target/s390x: Use unwind data for helper_mvpg
[qemu.git] / target / s390x / translate.c
blobae298892788cb045e989f4555cadf7f65a1b7124
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "disas/disas.h"
34 #include "exec/exec-all.h"
35 #include "tcg-op.h"
36 #include "qemu/log.h"
37 #include "qemu/host-utils.h"
38 #include "exec/cpu_ldst.h"
40 /* global register indexes */
41 static TCGv_env cpu_env;
43 #include "exec/gen-icount.h"
44 #include "exec/helper-proto.h"
45 #include "exec/helper-gen.h"
47 #include "trace-tcg.h"
48 #include "exec/log.h"
51 /* Information that (most) every instruction needs to manipulate. */
52 typedef struct DisasContext DisasContext;
53 typedef struct DisasInsn DisasInsn;
54 typedef struct DisasFields DisasFields;
56 struct DisasContext {
57 struct TranslationBlock *tb;
58 const DisasInsn *insn;
59 DisasFields *fields;
60 uint64_t pc, next_pc;
61 enum cc_op cc_op;
62 bool singlestep_enabled;
65 /* Information carried about a condition to be evaluated. */
66 typedef struct {
67 TCGCond cond:8;
68 bool is_64;
69 bool g1;
70 bool g2;
71 union {
72 struct { TCGv_i64 a, b; } s64;
73 struct { TCGv_i32 a, b; } s32;
74 } u;
75 } DisasCompare;
77 #define DISAS_EXCP 4
79 #ifdef DEBUG_INLINE_BRANCHES
80 static uint64_t inline_branch_hit[CC_OP_MAX];
81 static uint64_t inline_branch_miss[CC_OP_MAX];
82 #endif
84 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
86 if (!(s->tb->flags & FLAG_MASK_64)) {
87 if (s->tb->flags & FLAG_MASK_32) {
88 return pc | 0x80000000;
91 return pc;
94 void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
95 int flags)
97 S390CPU *cpu = S390_CPU(cs);
98 CPUS390XState *env = &cpu->env;
99 int i;
101 if (env->cc_op > 3) {
102 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
103 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
104 } else {
105 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
106 env->psw.mask, env->psw.addr, env->cc_op);
109 for (i = 0; i < 16; i++) {
110 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
111 if ((i % 4) == 3) {
112 cpu_fprintf(f, "\n");
113 } else {
114 cpu_fprintf(f, " ");
118 for (i = 0; i < 16; i++) {
119 cpu_fprintf(f, "F%02d=%016" PRIx64, i, get_freg(env, i)->ll);
120 if ((i % 4) == 3) {
121 cpu_fprintf(f, "\n");
122 } else {
123 cpu_fprintf(f, " ");
127 for (i = 0; i < 32; i++) {
128 cpu_fprintf(f, "V%02d=%016" PRIx64 "%016" PRIx64, i,
129 env->vregs[i][0].ll, env->vregs[i][1].ll);
130 cpu_fprintf(f, (i % 2) ? "\n" : " ");
133 #ifndef CONFIG_USER_ONLY
134 for (i = 0; i < 16; i++) {
135 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
136 if ((i % 4) == 3) {
137 cpu_fprintf(f, "\n");
138 } else {
139 cpu_fprintf(f, " ");
142 #endif
144 #ifdef DEBUG_INLINE_BRANCHES
145 for (i = 0; i < CC_OP_MAX; i++) {
146 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
147 inline_branch_miss[i], inline_branch_hit[i]);
149 #endif
151 cpu_fprintf(f, "\n");
154 static TCGv_i64 psw_addr;
155 static TCGv_i64 psw_mask;
156 static TCGv_i64 gbea;
158 static TCGv_i32 cc_op;
159 static TCGv_i64 cc_src;
160 static TCGv_i64 cc_dst;
161 static TCGv_i64 cc_vr;
163 static char cpu_reg_names[32][4];
164 static TCGv_i64 regs[16];
165 static TCGv_i64 fregs[16];
167 void s390x_translate_init(void)
169 int i;
171 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
172 tcg_ctx.tcg_env = cpu_env;
173 psw_addr = tcg_global_mem_new_i64(cpu_env,
174 offsetof(CPUS390XState, psw.addr),
175 "psw_addr");
176 psw_mask = tcg_global_mem_new_i64(cpu_env,
177 offsetof(CPUS390XState, psw.mask),
178 "psw_mask");
179 gbea = tcg_global_mem_new_i64(cpu_env,
180 offsetof(CPUS390XState, gbea),
181 "gbea");
183 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
184 "cc_op");
185 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
186 "cc_src");
187 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
188 "cc_dst");
189 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
190 "cc_vr");
192 for (i = 0; i < 16; i++) {
193 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
194 regs[i] = tcg_global_mem_new(cpu_env,
195 offsetof(CPUS390XState, regs[i]),
196 cpu_reg_names[i]);
199 for (i = 0; i < 16; i++) {
200 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
201 fregs[i] = tcg_global_mem_new(cpu_env,
202 offsetof(CPUS390XState, vregs[i][0].d),
203 cpu_reg_names[i + 16]);
207 static TCGv_i64 load_reg(int reg)
209 TCGv_i64 r = tcg_temp_new_i64();
210 tcg_gen_mov_i64(r, regs[reg]);
211 return r;
214 static TCGv_i64 load_freg32_i64(int reg)
216 TCGv_i64 r = tcg_temp_new_i64();
217 tcg_gen_shri_i64(r, fregs[reg], 32);
218 return r;
221 static void store_reg(int reg, TCGv_i64 v)
223 tcg_gen_mov_i64(regs[reg], v);
226 static void store_freg(int reg, TCGv_i64 v)
228 tcg_gen_mov_i64(fregs[reg], v);
231 static void store_reg32_i64(int reg, TCGv_i64 v)
233 /* 32 bit register writes keep the upper half */
234 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
237 static void store_reg32h_i64(int reg, TCGv_i64 v)
239 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
242 static void store_freg32_i64(int reg, TCGv_i64 v)
244 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
247 static void return_low128(TCGv_i64 dest)
249 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
252 static void update_psw_addr(DisasContext *s)
254 /* psw.addr */
255 tcg_gen_movi_i64(psw_addr, s->pc);
258 static void per_branch(DisasContext *s, bool to_next)
260 #ifndef CONFIG_USER_ONLY
261 tcg_gen_movi_i64(gbea, s->pc);
263 if (s->tb->flags & FLAG_MASK_PER) {
264 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->next_pc) : psw_addr;
265 gen_helper_per_branch(cpu_env, gbea, next_pc);
266 if (to_next) {
267 tcg_temp_free_i64(next_pc);
270 #endif
273 static void per_branch_cond(DisasContext *s, TCGCond cond,
274 TCGv_i64 arg1, TCGv_i64 arg2)
276 #ifndef CONFIG_USER_ONLY
277 if (s->tb->flags & FLAG_MASK_PER) {
278 TCGLabel *lab = gen_new_label();
279 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
281 tcg_gen_movi_i64(gbea, s->pc);
282 gen_helper_per_branch(cpu_env, gbea, psw_addr);
284 gen_set_label(lab);
285 } else {
286 TCGv_i64 pc = tcg_const_i64(s->pc);
287 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
288 tcg_temp_free_i64(pc);
290 #endif
293 static void per_breaking_event(DisasContext *s)
295 tcg_gen_movi_i64(gbea, s->pc);
298 static void update_cc_op(DisasContext *s)
300 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
301 tcg_gen_movi_i32(cc_op, s->cc_op);
305 static void potential_page_fault(DisasContext *s)
307 update_psw_addr(s);
308 update_cc_op(s);
311 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
313 return (uint64_t)cpu_lduw_code(env, pc);
316 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
318 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
321 static int get_mem_index(DisasContext *s)
323 switch (s->tb->flags & FLAG_MASK_ASC) {
324 case PSW_ASC_PRIMARY >> 32:
325 return 0;
326 case PSW_ASC_SECONDARY >> 32:
327 return 1;
328 case PSW_ASC_HOME >> 32:
329 return 2;
330 default:
331 tcg_abort();
332 break;
336 static void gen_exception(int excp)
338 TCGv_i32 tmp = tcg_const_i32(excp);
339 gen_helper_exception(cpu_env, tmp);
340 tcg_temp_free_i32(tmp);
343 static void gen_program_exception(DisasContext *s, int code)
345 TCGv_i32 tmp;
347 /* Remember what pgm exeption this was. */
348 tmp = tcg_const_i32(code);
349 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
350 tcg_temp_free_i32(tmp);
352 tmp = tcg_const_i32(s->next_pc - s->pc);
353 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
354 tcg_temp_free_i32(tmp);
356 /* Advance past instruction. */
357 s->pc = s->next_pc;
358 update_psw_addr(s);
360 /* Save off cc. */
361 update_cc_op(s);
363 /* Trigger exception. */
364 gen_exception(EXCP_PGM);
367 static inline void gen_illegal_opcode(DisasContext *s)
369 gen_program_exception(s, PGM_OPERATION);
372 static inline void gen_trap(DisasContext *s)
374 TCGv_i32 t;
376 /* Set DXC to 0xff. */
377 t = tcg_temp_new_i32();
378 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
379 tcg_gen_ori_i32(t, t, 0xff00);
380 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
381 tcg_temp_free_i32(t);
383 gen_program_exception(s, PGM_DATA);
386 #ifndef CONFIG_USER_ONLY
387 static void check_privileged(DisasContext *s)
389 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
390 gen_program_exception(s, PGM_PRIVILEGED);
393 #endif
395 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
397 TCGv_i64 tmp = tcg_temp_new_i64();
398 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
400 /* Note that d2 is limited to 20 bits, signed. If we crop negative
401 displacements early we create larger immedate addends. */
403 /* Note that addi optimizes the imm==0 case. */
404 if (b2 && x2) {
405 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
406 tcg_gen_addi_i64(tmp, tmp, d2);
407 } else if (b2) {
408 tcg_gen_addi_i64(tmp, regs[b2], d2);
409 } else if (x2) {
410 tcg_gen_addi_i64(tmp, regs[x2], d2);
411 } else {
412 if (need_31) {
413 d2 &= 0x7fffffff;
414 need_31 = false;
416 tcg_gen_movi_i64(tmp, d2);
418 if (need_31) {
419 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
422 return tmp;
425 static inline bool live_cc_data(DisasContext *s)
427 return (s->cc_op != CC_OP_DYNAMIC
428 && s->cc_op != CC_OP_STATIC
429 && s->cc_op > 3);
432 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
434 if (live_cc_data(s)) {
435 tcg_gen_discard_i64(cc_src);
436 tcg_gen_discard_i64(cc_dst);
437 tcg_gen_discard_i64(cc_vr);
439 s->cc_op = CC_OP_CONST0 + val;
442 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
444 if (live_cc_data(s)) {
445 tcg_gen_discard_i64(cc_src);
446 tcg_gen_discard_i64(cc_vr);
448 tcg_gen_mov_i64(cc_dst, dst);
449 s->cc_op = op;
452 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
453 TCGv_i64 dst)
455 if (live_cc_data(s)) {
456 tcg_gen_discard_i64(cc_vr);
458 tcg_gen_mov_i64(cc_src, src);
459 tcg_gen_mov_i64(cc_dst, dst);
460 s->cc_op = op;
463 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
464 TCGv_i64 dst, TCGv_i64 vr)
466 tcg_gen_mov_i64(cc_src, src);
467 tcg_gen_mov_i64(cc_dst, dst);
468 tcg_gen_mov_i64(cc_vr, vr);
469 s->cc_op = op;
472 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
474 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
477 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
479 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
482 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
484 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
487 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
489 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
492 /* CC value is in env->cc_op */
493 static void set_cc_static(DisasContext *s)
495 if (live_cc_data(s)) {
496 tcg_gen_discard_i64(cc_src);
497 tcg_gen_discard_i64(cc_dst);
498 tcg_gen_discard_i64(cc_vr);
500 s->cc_op = CC_OP_STATIC;
503 /* calculates cc into cc_op */
504 static void gen_op_calc_cc(DisasContext *s)
506 TCGv_i32 local_cc_op;
507 TCGv_i64 dummy;
509 TCGV_UNUSED_I32(local_cc_op);
510 TCGV_UNUSED_I64(dummy);
511 switch (s->cc_op) {
512 default:
513 dummy = tcg_const_i64(0);
514 /* FALLTHRU */
515 case CC_OP_ADD_64:
516 case CC_OP_ADDU_64:
517 case CC_OP_ADDC_64:
518 case CC_OP_SUB_64:
519 case CC_OP_SUBU_64:
520 case CC_OP_SUBB_64:
521 case CC_OP_ADD_32:
522 case CC_OP_ADDU_32:
523 case CC_OP_ADDC_32:
524 case CC_OP_SUB_32:
525 case CC_OP_SUBU_32:
526 case CC_OP_SUBB_32:
527 local_cc_op = tcg_const_i32(s->cc_op);
528 break;
529 case CC_OP_CONST0:
530 case CC_OP_CONST1:
531 case CC_OP_CONST2:
532 case CC_OP_CONST3:
533 case CC_OP_STATIC:
534 case CC_OP_DYNAMIC:
535 break;
538 switch (s->cc_op) {
539 case CC_OP_CONST0:
540 case CC_OP_CONST1:
541 case CC_OP_CONST2:
542 case CC_OP_CONST3:
543 /* s->cc_op is the cc value */
544 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
545 break;
546 case CC_OP_STATIC:
547 /* env->cc_op already is the cc value */
548 break;
549 case CC_OP_NZ:
550 case CC_OP_ABS_64:
551 case CC_OP_NABS_64:
552 case CC_OP_ABS_32:
553 case CC_OP_NABS_32:
554 case CC_OP_LTGT0_32:
555 case CC_OP_LTGT0_64:
556 case CC_OP_COMP_32:
557 case CC_OP_COMP_64:
558 case CC_OP_NZ_F32:
559 case CC_OP_NZ_F64:
560 case CC_OP_FLOGR:
561 /* 1 argument */
562 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
563 break;
564 case CC_OP_ICM:
565 case CC_OP_LTGT_32:
566 case CC_OP_LTGT_64:
567 case CC_OP_LTUGTU_32:
568 case CC_OP_LTUGTU_64:
569 case CC_OP_TM_32:
570 case CC_OP_TM_64:
571 case CC_OP_SLA_32:
572 case CC_OP_SLA_64:
573 case CC_OP_NZ_F128:
574 /* 2 arguments */
575 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
576 break;
577 case CC_OP_ADD_64:
578 case CC_OP_ADDU_64:
579 case CC_OP_ADDC_64:
580 case CC_OP_SUB_64:
581 case CC_OP_SUBU_64:
582 case CC_OP_SUBB_64:
583 case CC_OP_ADD_32:
584 case CC_OP_ADDU_32:
585 case CC_OP_ADDC_32:
586 case CC_OP_SUB_32:
587 case CC_OP_SUBU_32:
588 case CC_OP_SUBB_32:
589 /* 3 arguments */
590 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
591 break;
592 case CC_OP_DYNAMIC:
593 /* unknown operation - assume 3 arguments and cc_op in env */
594 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
595 break;
596 default:
597 tcg_abort();
600 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
601 tcg_temp_free_i32(local_cc_op);
603 if (!TCGV_IS_UNUSED_I64(dummy)) {
604 tcg_temp_free_i64(dummy);
607 /* We now have cc in cc_op as constant */
608 set_cc_static(s);
611 static bool use_exit_tb(DisasContext *s)
613 return (s->singlestep_enabled ||
614 (s->tb->cflags & CF_LAST_IO) ||
615 (s->tb->flags & FLAG_MASK_PER));
618 static bool use_goto_tb(DisasContext *s, uint64_t dest)
620 if (unlikely(use_exit_tb(s))) {
621 return false;
623 #ifndef CONFIG_USER_ONLY
624 return (dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) ||
625 (dest & TARGET_PAGE_MASK) == (s->pc & TARGET_PAGE_MASK);
626 #else
627 return true;
628 #endif
631 static void account_noninline_branch(DisasContext *s, int cc_op)
633 #ifdef DEBUG_INLINE_BRANCHES
634 inline_branch_miss[cc_op]++;
635 #endif
638 static void account_inline_branch(DisasContext *s, int cc_op)
640 #ifdef DEBUG_INLINE_BRANCHES
641 inline_branch_hit[cc_op]++;
642 #endif
645 /* Table of mask values to comparison codes, given a comparison as input.
646 For such, CC=3 should not be possible. */
647 static const TCGCond ltgt_cond[16] = {
648 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
649 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
650 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
651 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
652 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
653 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
654 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
655 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
658 /* Table of mask values to comparison codes, given a logic op as input.
659 For such, only CC=0 and CC=1 should be possible. */
660 static const TCGCond nz_cond[16] = {
661 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
662 TCG_COND_NEVER, TCG_COND_NEVER,
663 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
664 TCG_COND_NE, TCG_COND_NE,
665 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
666 TCG_COND_EQ, TCG_COND_EQ,
667 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
668 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
671 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
672 details required to generate a TCG comparison. */
673 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
675 TCGCond cond;
676 enum cc_op old_cc_op = s->cc_op;
678 if (mask == 15 || mask == 0) {
679 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
680 c->u.s32.a = cc_op;
681 c->u.s32.b = cc_op;
682 c->g1 = c->g2 = true;
683 c->is_64 = false;
684 return;
687 /* Find the TCG condition for the mask + cc op. */
688 switch (old_cc_op) {
689 case CC_OP_LTGT0_32:
690 case CC_OP_LTGT0_64:
691 case CC_OP_LTGT_32:
692 case CC_OP_LTGT_64:
693 cond = ltgt_cond[mask];
694 if (cond == TCG_COND_NEVER) {
695 goto do_dynamic;
697 account_inline_branch(s, old_cc_op);
698 break;
700 case CC_OP_LTUGTU_32:
701 case CC_OP_LTUGTU_64:
702 cond = tcg_unsigned_cond(ltgt_cond[mask]);
703 if (cond == TCG_COND_NEVER) {
704 goto do_dynamic;
706 account_inline_branch(s, old_cc_op);
707 break;
709 case CC_OP_NZ:
710 cond = nz_cond[mask];
711 if (cond == TCG_COND_NEVER) {
712 goto do_dynamic;
714 account_inline_branch(s, old_cc_op);
715 break;
717 case CC_OP_TM_32:
718 case CC_OP_TM_64:
719 switch (mask) {
720 case 8:
721 cond = TCG_COND_EQ;
722 break;
723 case 4 | 2 | 1:
724 cond = TCG_COND_NE;
725 break;
726 default:
727 goto do_dynamic;
729 account_inline_branch(s, old_cc_op);
730 break;
732 case CC_OP_ICM:
733 switch (mask) {
734 case 8:
735 cond = TCG_COND_EQ;
736 break;
737 case 4 | 2 | 1:
738 case 4 | 2:
739 cond = TCG_COND_NE;
740 break;
741 default:
742 goto do_dynamic;
744 account_inline_branch(s, old_cc_op);
745 break;
747 case CC_OP_FLOGR:
748 switch (mask & 0xa) {
749 case 8: /* src == 0 -> no one bit found */
750 cond = TCG_COND_EQ;
751 break;
752 case 2: /* src != 0 -> one bit found */
753 cond = TCG_COND_NE;
754 break;
755 default:
756 goto do_dynamic;
758 account_inline_branch(s, old_cc_op);
759 break;
761 case CC_OP_ADDU_32:
762 case CC_OP_ADDU_64:
763 switch (mask) {
764 case 8 | 2: /* vr == 0 */
765 cond = TCG_COND_EQ;
766 break;
767 case 4 | 1: /* vr != 0 */
768 cond = TCG_COND_NE;
769 break;
770 case 8 | 4: /* no carry -> vr >= src */
771 cond = TCG_COND_GEU;
772 break;
773 case 2 | 1: /* carry -> vr < src */
774 cond = TCG_COND_LTU;
775 break;
776 default:
777 goto do_dynamic;
779 account_inline_branch(s, old_cc_op);
780 break;
782 case CC_OP_SUBU_32:
783 case CC_OP_SUBU_64:
784 /* Note that CC=0 is impossible; treat it as dont-care. */
785 switch (mask & 7) {
786 case 2: /* zero -> op1 == op2 */
787 cond = TCG_COND_EQ;
788 break;
789 case 4 | 1: /* !zero -> op1 != op2 */
790 cond = TCG_COND_NE;
791 break;
792 case 4: /* borrow (!carry) -> op1 < op2 */
793 cond = TCG_COND_LTU;
794 break;
795 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
796 cond = TCG_COND_GEU;
797 break;
798 default:
799 goto do_dynamic;
801 account_inline_branch(s, old_cc_op);
802 break;
804 default:
805 do_dynamic:
806 /* Calculate cc value. */
807 gen_op_calc_cc(s);
808 /* FALLTHRU */
810 case CC_OP_STATIC:
811 /* Jump based on CC. We'll load up the real cond below;
812 the assignment here merely avoids a compiler warning. */
813 account_noninline_branch(s, old_cc_op);
814 old_cc_op = CC_OP_STATIC;
815 cond = TCG_COND_NEVER;
816 break;
819 /* Load up the arguments of the comparison. */
820 c->is_64 = true;
821 c->g1 = c->g2 = false;
822 switch (old_cc_op) {
823 case CC_OP_LTGT0_32:
824 c->is_64 = false;
825 c->u.s32.a = tcg_temp_new_i32();
826 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
827 c->u.s32.b = tcg_const_i32(0);
828 break;
829 case CC_OP_LTGT_32:
830 case CC_OP_LTUGTU_32:
831 case CC_OP_SUBU_32:
832 c->is_64 = false;
833 c->u.s32.a = tcg_temp_new_i32();
834 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
835 c->u.s32.b = tcg_temp_new_i32();
836 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
837 break;
839 case CC_OP_LTGT0_64:
840 case CC_OP_NZ:
841 case CC_OP_FLOGR:
842 c->u.s64.a = cc_dst;
843 c->u.s64.b = tcg_const_i64(0);
844 c->g1 = true;
845 break;
846 case CC_OP_LTGT_64:
847 case CC_OP_LTUGTU_64:
848 case CC_OP_SUBU_64:
849 c->u.s64.a = cc_src;
850 c->u.s64.b = cc_dst;
851 c->g1 = c->g2 = true;
852 break;
854 case CC_OP_TM_32:
855 case CC_OP_TM_64:
856 case CC_OP_ICM:
857 c->u.s64.a = tcg_temp_new_i64();
858 c->u.s64.b = tcg_const_i64(0);
859 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
860 break;
862 case CC_OP_ADDU_32:
863 c->is_64 = false;
864 c->u.s32.a = tcg_temp_new_i32();
865 c->u.s32.b = tcg_temp_new_i32();
866 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
867 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
868 tcg_gen_movi_i32(c->u.s32.b, 0);
869 } else {
870 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
872 break;
874 case CC_OP_ADDU_64:
875 c->u.s64.a = cc_vr;
876 c->g1 = true;
877 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
878 c->u.s64.b = tcg_const_i64(0);
879 } else {
880 c->u.s64.b = cc_src;
881 c->g2 = true;
883 break;
885 case CC_OP_STATIC:
886 c->is_64 = false;
887 c->u.s32.a = cc_op;
888 c->g1 = true;
889 switch (mask) {
890 case 0x8 | 0x4 | 0x2: /* cc != 3 */
891 cond = TCG_COND_NE;
892 c->u.s32.b = tcg_const_i32(3);
893 break;
894 case 0x8 | 0x4 | 0x1: /* cc != 2 */
895 cond = TCG_COND_NE;
896 c->u.s32.b = tcg_const_i32(2);
897 break;
898 case 0x8 | 0x2 | 0x1: /* cc != 1 */
899 cond = TCG_COND_NE;
900 c->u.s32.b = tcg_const_i32(1);
901 break;
902 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
903 cond = TCG_COND_EQ;
904 c->g1 = false;
905 c->u.s32.a = tcg_temp_new_i32();
906 c->u.s32.b = tcg_const_i32(0);
907 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
908 break;
909 case 0x8 | 0x4: /* cc < 2 */
910 cond = TCG_COND_LTU;
911 c->u.s32.b = tcg_const_i32(2);
912 break;
913 case 0x8: /* cc == 0 */
914 cond = TCG_COND_EQ;
915 c->u.s32.b = tcg_const_i32(0);
916 break;
917 case 0x4 | 0x2 | 0x1: /* cc != 0 */
918 cond = TCG_COND_NE;
919 c->u.s32.b = tcg_const_i32(0);
920 break;
921 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
922 cond = TCG_COND_NE;
923 c->g1 = false;
924 c->u.s32.a = tcg_temp_new_i32();
925 c->u.s32.b = tcg_const_i32(0);
926 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
927 break;
928 case 0x4: /* cc == 1 */
929 cond = TCG_COND_EQ;
930 c->u.s32.b = tcg_const_i32(1);
931 break;
932 case 0x2 | 0x1: /* cc > 1 */
933 cond = TCG_COND_GTU;
934 c->u.s32.b = tcg_const_i32(1);
935 break;
936 case 0x2: /* cc == 2 */
937 cond = TCG_COND_EQ;
938 c->u.s32.b = tcg_const_i32(2);
939 break;
940 case 0x1: /* cc == 3 */
941 cond = TCG_COND_EQ;
942 c->u.s32.b = tcg_const_i32(3);
943 break;
944 default:
945 /* CC is masked by something else: (8 >> cc) & mask. */
946 cond = TCG_COND_NE;
947 c->g1 = false;
948 c->u.s32.a = tcg_const_i32(8);
949 c->u.s32.b = tcg_const_i32(0);
950 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
951 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
952 break;
954 break;
956 default:
957 abort();
959 c->cond = cond;
962 static void free_compare(DisasCompare *c)
964 if (!c->g1) {
965 if (c->is_64) {
966 tcg_temp_free_i64(c->u.s64.a);
967 } else {
968 tcg_temp_free_i32(c->u.s32.a);
971 if (!c->g2) {
972 if (c->is_64) {
973 tcg_temp_free_i64(c->u.s64.b);
974 } else {
975 tcg_temp_free_i32(c->u.s32.b);
980 /* ====================================================================== */
981 /* Define the insn format enumeration. */
982 #define F0(N) FMT_##N,
983 #define F1(N, X1) F0(N)
984 #define F2(N, X1, X2) F0(N)
985 #define F3(N, X1, X2, X3) F0(N)
986 #define F4(N, X1, X2, X3, X4) F0(N)
987 #define F5(N, X1, X2, X3, X4, X5) F0(N)
989 typedef enum {
990 #include "insn-format.def"
991 } DisasFormat;
993 #undef F0
994 #undef F1
995 #undef F2
996 #undef F3
997 #undef F4
998 #undef F5
1000 /* Define a structure to hold the decoded fields. We'll store each inside
1001 an array indexed by an enum. In order to conserve memory, we'll arrange
1002 for fields that do not exist at the same time to overlap, thus the "C"
1003 for compact. For checking purposes there is an "O" for original index
1004 as well that will be applied to availability bitmaps. */
1006 enum DisasFieldIndexO {
1007 FLD_O_r1,
1008 FLD_O_r2,
1009 FLD_O_r3,
1010 FLD_O_m1,
1011 FLD_O_m3,
1012 FLD_O_m4,
1013 FLD_O_b1,
1014 FLD_O_b2,
1015 FLD_O_b4,
1016 FLD_O_d1,
1017 FLD_O_d2,
1018 FLD_O_d4,
1019 FLD_O_x2,
1020 FLD_O_l1,
1021 FLD_O_l2,
1022 FLD_O_i1,
1023 FLD_O_i2,
1024 FLD_O_i3,
1025 FLD_O_i4,
1026 FLD_O_i5
1029 enum DisasFieldIndexC {
1030 FLD_C_r1 = 0,
1031 FLD_C_m1 = 0,
1032 FLD_C_b1 = 0,
1033 FLD_C_i1 = 0,
1035 FLD_C_r2 = 1,
1036 FLD_C_b2 = 1,
1037 FLD_C_i2 = 1,
1039 FLD_C_r3 = 2,
1040 FLD_C_m3 = 2,
1041 FLD_C_i3 = 2,
1043 FLD_C_m4 = 3,
1044 FLD_C_b4 = 3,
1045 FLD_C_i4 = 3,
1046 FLD_C_l1 = 3,
1048 FLD_C_i5 = 4,
1049 FLD_C_d1 = 4,
1051 FLD_C_d2 = 5,
1053 FLD_C_d4 = 6,
1054 FLD_C_x2 = 6,
1055 FLD_C_l2 = 6,
1057 NUM_C_FIELD = 7
1060 struct DisasFields {
1061 uint64_t raw_insn;
1062 unsigned op:8;
1063 unsigned op2:8;
1064 unsigned presentC:16;
1065 unsigned int presentO;
1066 int c[NUM_C_FIELD];
1069 /* This is the way fields are to be accessed out of DisasFields. */
1070 #define have_field(S, F) have_field1((S), FLD_O_##F)
1071 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1073 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1075 return (f->presentO >> c) & 1;
1078 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1079 enum DisasFieldIndexC c)
1081 assert(have_field1(f, o));
1082 return f->c[c];
1085 /* Describe the layout of each field in each format. */
1086 typedef struct DisasField {
1087 unsigned int beg:8;
1088 unsigned int size:8;
1089 unsigned int type:2;
1090 unsigned int indexC:6;
1091 enum DisasFieldIndexO indexO:8;
1092 } DisasField;
1094 typedef struct DisasFormatInfo {
1095 DisasField op[NUM_C_FIELD];
1096 } DisasFormatInfo;
1098 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1099 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1100 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1101 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1102 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1103 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1104 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1105 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1106 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1107 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1108 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1109 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1110 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1111 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1113 #define F0(N) { { } },
1114 #define F1(N, X1) { { X1 } },
1115 #define F2(N, X1, X2) { { X1, X2 } },
1116 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1117 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1118 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1120 static const DisasFormatInfo format_info[] = {
1121 #include "insn-format.def"
1124 #undef F0
1125 #undef F1
1126 #undef F2
1127 #undef F3
1128 #undef F4
1129 #undef F5
1130 #undef R
1131 #undef M
1132 #undef BD
1133 #undef BXD
1134 #undef BDL
1135 #undef BXDL
1136 #undef I
1137 #undef L
1139 /* Generally, we'll extract operands into this structures, operate upon
1140 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1141 of routines below for more details. */
1142 typedef struct {
1143 bool g_out, g_out2, g_in1, g_in2;
1144 TCGv_i64 out, out2, in1, in2;
1145 TCGv_i64 addr1;
1146 } DisasOps;
1148 /* Instructions can place constraints on their operands, raising specification
1149 exceptions if they are violated. To make this easy to automate, each "in1",
1150 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1151 of the following, or 0. To make this easy to document, we'll put the
1152 SPEC_<name> defines next to <name>. */
1154 #define SPEC_r1_even 1
1155 #define SPEC_r2_even 2
1156 #define SPEC_r3_even 4
1157 #define SPEC_r1_f128 8
1158 #define SPEC_r2_f128 16
1160 /* Return values from translate_one, indicating the state of the TB. */
1161 typedef enum {
1162 /* Continue the TB. */
1163 NO_EXIT,
1164 /* We have emitted one or more goto_tb. No fixup required. */
1165 EXIT_GOTO_TB,
1166 /* We are not using a goto_tb (for whatever reason), but have updated
1167 the PC (for whatever reason), so there's no need to do it again on
1168 exiting the TB. */
1169 EXIT_PC_UPDATED,
1170 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1171 updated the PC for the next instruction to be executed. */
1172 EXIT_PC_STALE,
1173 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1174 No following code will be executed. */
1175 EXIT_NORETURN,
1176 } ExitStatus;
1178 typedef enum DisasFacility {
1179 FAC_Z, /* zarch (default) */
1180 FAC_CASS, /* compare and swap and store */
1181 FAC_CASS2, /* compare and swap and store 2*/
1182 FAC_DFP, /* decimal floating point */
1183 FAC_DFPR, /* decimal floating point rounding */
1184 FAC_DO, /* distinct operands */
1185 FAC_EE, /* execute extensions */
1186 FAC_EI, /* extended immediate */
1187 FAC_FPE, /* floating point extension */
1188 FAC_FPSSH, /* floating point support sign handling */
1189 FAC_FPRGR, /* FPR-GR transfer */
1190 FAC_GIE, /* general instructions extension */
1191 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1192 FAC_HW, /* high-word */
1193 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1194 FAC_MIE, /* miscellaneous-instruction-extensions */
1195 FAC_LAT, /* load-and-trap */
1196 FAC_LOC, /* load/store on condition */
1197 FAC_LD, /* long displacement */
1198 FAC_PC, /* population count */
1199 FAC_SCF, /* store clock fast */
1200 FAC_SFLE, /* store facility list extended */
1201 FAC_ILA, /* interlocked access facility 1 */
1202 FAC_LPP, /* load-program-parameter */
1203 } DisasFacility;
1205 struct DisasInsn {
1206 unsigned opc:16;
1207 DisasFormat fmt:8;
1208 DisasFacility fac:8;
1209 unsigned spec:8;
1211 const char *name;
1213 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1214 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1215 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1216 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1217 void (*help_cout)(DisasContext *, DisasOps *);
1218 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1220 uint64_t data;
1223 /* ====================================================================== */
1224 /* Miscellaneous helpers, used by several operations. */
1226 static void help_l2_shift(DisasContext *s, DisasFields *f,
1227 DisasOps *o, int mask)
1229 int b2 = get_field(f, b2);
1230 int d2 = get_field(f, d2);
1232 if (b2 == 0) {
1233 o->in2 = tcg_const_i64(d2 & mask);
1234 } else {
1235 o->in2 = get_address(s, 0, b2, d2);
1236 tcg_gen_andi_i64(o->in2, o->in2, mask);
1240 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1242 if (dest == s->next_pc) {
1243 per_branch(s, true);
1244 return NO_EXIT;
1246 if (use_goto_tb(s, dest)) {
1247 update_cc_op(s);
1248 per_breaking_event(s);
1249 tcg_gen_goto_tb(0);
1250 tcg_gen_movi_i64(psw_addr, dest);
1251 tcg_gen_exit_tb((uintptr_t)s->tb);
1252 return EXIT_GOTO_TB;
1253 } else {
1254 tcg_gen_movi_i64(psw_addr, dest);
1255 per_branch(s, false);
1256 return EXIT_PC_UPDATED;
1260 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1261 bool is_imm, int imm, TCGv_i64 cdest)
1263 ExitStatus ret;
1264 uint64_t dest = s->pc + 2 * imm;
1265 TCGLabel *lab;
1267 /* Take care of the special cases first. */
1268 if (c->cond == TCG_COND_NEVER) {
1269 ret = NO_EXIT;
1270 goto egress;
1272 if (is_imm) {
1273 if (dest == s->next_pc) {
1274 /* Branch to next. */
1275 per_branch(s, true);
1276 ret = NO_EXIT;
1277 goto egress;
1279 if (c->cond == TCG_COND_ALWAYS) {
1280 ret = help_goto_direct(s, dest);
1281 goto egress;
1283 } else {
1284 if (TCGV_IS_UNUSED_I64(cdest)) {
1285 /* E.g. bcr %r0 -> no branch. */
1286 ret = NO_EXIT;
1287 goto egress;
1289 if (c->cond == TCG_COND_ALWAYS) {
1290 tcg_gen_mov_i64(psw_addr, cdest);
1291 per_branch(s, false);
1292 ret = EXIT_PC_UPDATED;
1293 goto egress;
1297 if (use_goto_tb(s, s->next_pc)) {
1298 if (is_imm && use_goto_tb(s, dest)) {
1299 /* Both exits can use goto_tb. */
1300 update_cc_op(s);
1302 lab = gen_new_label();
1303 if (c->is_64) {
1304 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1305 } else {
1306 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1309 /* Branch not taken. */
1310 tcg_gen_goto_tb(0);
1311 tcg_gen_movi_i64(psw_addr, s->next_pc);
1312 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1314 /* Branch taken. */
1315 gen_set_label(lab);
1316 per_breaking_event(s);
1317 tcg_gen_goto_tb(1);
1318 tcg_gen_movi_i64(psw_addr, dest);
1319 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1321 ret = EXIT_GOTO_TB;
1322 } else {
1323 /* Fallthru can use goto_tb, but taken branch cannot. */
1324 /* Store taken branch destination before the brcond. This
1325 avoids having to allocate a new local temp to hold it.
1326 We'll overwrite this in the not taken case anyway. */
1327 if (!is_imm) {
1328 tcg_gen_mov_i64(psw_addr, cdest);
1331 lab = gen_new_label();
1332 if (c->is_64) {
1333 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1334 } else {
1335 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1338 /* Branch not taken. */
1339 update_cc_op(s);
1340 tcg_gen_goto_tb(0);
1341 tcg_gen_movi_i64(psw_addr, s->next_pc);
1342 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1344 gen_set_label(lab);
1345 if (is_imm) {
1346 tcg_gen_movi_i64(psw_addr, dest);
1348 per_breaking_event(s);
1349 ret = EXIT_PC_UPDATED;
1351 } else {
1352 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1353 Most commonly we're single-stepping or some other condition that
1354 disables all use of goto_tb. Just update the PC and exit. */
1356 TCGv_i64 next = tcg_const_i64(s->next_pc);
1357 if (is_imm) {
1358 cdest = tcg_const_i64(dest);
1361 if (c->is_64) {
1362 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1363 cdest, next);
1364 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1365 } else {
1366 TCGv_i32 t0 = tcg_temp_new_i32();
1367 TCGv_i64 t1 = tcg_temp_new_i64();
1368 TCGv_i64 z = tcg_const_i64(0);
1369 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1370 tcg_gen_extu_i32_i64(t1, t0);
1371 tcg_temp_free_i32(t0);
1372 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1373 per_branch_cond(s, TCG_COND_NE, t1, z);
1374 tcg_temp_free_i64(t1);
1375 tcg_temp_free_i64(z);
1378 if (is_imm) {
1379 tcg_temp_free_i64(cdest);
1381 tcg_temp_free_i64(next);
1383 ret = EXIT_PC_UPDATED;
1386 egress:
1387 free_compare(c);
1388 return ret;
1391 /* ====================================================================== */
1392 /* The operations. These perform the bulk of the work for any insn,
1393 usually after the operands have been loaded and output initialized. */
1395 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1397 TCGv_i64 z, n;
1398 z = tcg_const_i64(0);
1399 n = tcg_temp_new_i64();
1400 tcg_gen_neg_i64(n, o->in2);
1401 tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1402 tcg_temp_free_i64(n);
1403 tcg_temp_free_i64(z);
1404 return NO_EXIT;
1407 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1409 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1410 return NO_EXIT;
1413 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1415 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1416 return NO_EXIT;
1419 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1421 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1422 tcg_gen_mov_i64(o->out2, o->in2);
1423 return NO_EXIT;
1426 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1428 tcg_gen_add_i64(o->out, o->in1, o->in2);
1429 return NO_EXIT;
1432 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1434 DisasCompare cmp;
1435 TCGv_i64 carry;
1437 tcg_gen_add_i64(o->out, o->in1, o->in2);
1439 /* The carry flag is the msb of CC, therefore the branch mask that would
1440 create that comparison is 3. Feeding the generated comparison to
1441 setcond produces the carry flag that we desire. */
1442 disas_jcc(s, &cmp, 3);
1443 carry = tcg_temp_new_i64();
1444 if (cmp.is_64) {
1445 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1446 } else {
1447 TCGv_i32 t = tcg_temp_new_i32();
1448 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1449 tcg_gen_extu_i32_i64(carry, t);
1450 tcg_temp_free_i32(t);
1452 free_compare(&cmp);
1454 tcg_gen_add_i64(o->out, o->out, carry);
1455 tcg_temp_free_i64(carry);
1456 return NO_EXIT;
1459 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1461 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1462 return NO_EXIT;
1465 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1467 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1468 return NO_EXIT;
1471 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1473 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1474 return_low128(o->out2);
1475 return NO_EXIT;
1478 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1480 tcg_gen_and_i64(o->out, o->in1, o->in2);
1481 return NO_EXIT;
1484 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1486 int shift = s->insn->data & 0xff;
1487 int size = s->insn->data >> 8;
1488 uint64_t mask = ((1ull << size) - 1) << shift;
1490 assert(!o->g_in2);
1491 tcg_gen_shli_i64(o->in2, o->in2, shift);
1492 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1493 tcg_gen_and_i64(o->out, o->in1, o->in2);
1495 /* Produce the CC from only the bits manipulated. */
1496 tcg_gen_andi_i64(cc_dst, o->out, mask);
1497 set_cc_nz_u64(s, cc_dst);
1498 return NO_EXIT;
1501 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1503 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1504 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1505 tcg_gen_mov_i64(psw_addr, o->in2);
1506 per_branch(s, false);
1507 return EXIT_PC_UPDATED;
1508 } else {
1509 return NO_EXIT;
1513 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1515 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1516 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1519 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1521 int m1 = get_field(s->fields, m1);
1522 bool is_imm = have_field(s->fields, i2);
1523 int imm = is_imm ? get_field(s->fields, i2) : 0;
1524 DisasCompare c;
1526 /* BCR with R2 = 0 causes no branching */
1527 if (have_field(s->fields, r2) && get_field(s->fields, r2) == 0) {
1528 if (m1 == 14) {
1529 /* Perform serialization */
1530 /* FIXME: check for fast-BCR-serialization facility */
1531 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1533 if (m1 == 15) {
1534 /* Perform serialization */
1535 /* FIXME: perform checkpoint-synchronisation */
1536 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1538 return NO_EXIT;
1541 disas_jcc(s, &c, m1);
1542 return help_branch(s, &c, is_imm, imm, o->in2);
1545 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1547 int r1 = get_field(s->fields, r1);
1548 bool is_imm = have_field(s->fields, i2);
1549 int imm = is_imm ? get_field(s->fields, i2) : 0;
1550 DisasCompare c;
1551 TCGv_i64 t;
1553 c.cond = TCG_COND_NE;
1554 c.is_64 = false;
1555 c.g1 = false;
1556 c.g2 = false;
1558 t = tcg_temp_new_i64();
1559 tcg_gen_subi_i64(t, regs[r1], 1);
1560 store_reg32_i64(r1, t);
1561 c.u.s32.a = tcg_temp_new_i32();
1562 c.u.s32.b = tcg_const_i32(0);
1563 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1564 tcg_temp_free_i64(t);
1566 return help_branch(s, &c, is_imm, imm, o->in2);
1569 static ExitStatus op_bcth(DisasContext *s, DisasOps *o)
1571 int r1 = get_field(s->fields, r1);
1572 int imm = get_field(s->fields, i2);
1573 DisasCompare c;
1574 TCGv_i64 t;
1576 c.cond = TCG_COND_NE;
1577 c.is_64 = false;
1578 c.g1 = false;
1579 c.g2 = false;
1581 t = tcg_temp_new_i64();
1582 tcg_gen_shri_i64(t, regs[r1], 32);
1583 tcg_gen_subi_i64(t, t, 1);
1584 store_reg32h_i64(r1, t);
1585 c.u.s32.a = tcg_temp_new_i32();
1586 c.u.s32.b = tcg_const_i32(0);
1587 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1588 tcg_temp_free_i64(t);
1590 return help_branch(s, &c, 1, imm, o->in2);
1593 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1595 int r1 = get_field(s->fields, r1);
1596 bool is_imm = have_field(s->fields, i2);
1597 int imm = is_imm ? get_field(s->fields, i2) : 0;
1598 DisasCompare c;
1600 c.cond = TCG_COND_NE;
1601 c.is_64 = true;
1602 c.g1 = true;
1603 c.g2 = false;
1605 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1606 c.u.s64.a = regs[r1];
1607 c.u.s64.b = tcg_const_i64(0);
1609 return help_branch(s, &c, is_imm, imm, o->in2);
1612 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1614 int r1 = get_field(s->fields, r1);
1615 int r3 = get_field(s->fields, r3);
1616 bool is_imm = have_field(s->fields, i2);
1617 int imm = is_imm ? get_field(s->fields, i2) : 0;
1618 DisasCompare c;
1619 TCGv_i64 t;
1621 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1622 c.is_64 = false;
1623 c.g1 = false;
1624 c.g2 = false;
1626 t = tcg_temp_new_i64();
1627 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1628 c.u.s32.a = tcg_temp_new_i32();
1629 c.u.s32.b = tcg_temp_new_i32();
1630 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1631 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1632 store_reg32_i64(r1, t);
1633 tcg_temp_free_i64(t);
1635 return help_branch(s, &c, is_imm, imm, o->in2);
1638 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1640 int r1 = get_field(s->fields, r1);
1641 int r3 = get_field(s->fields, r3);
1642 bool is_imm = have_field(s->fields, i2);
1643 int imm = is_imm ? get_field(s->fields, i2) : 0;
1644 DisasCompare c;
1646 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1647 c.is_64 = true;
1649 if (r1 == (r3 | 1)) {
1650 c.u.s64.b = load_reg(r3 | 1);
1651 c.g2 = false;
1652 } else {
1653 c.u.s64.b = regs[r3 | 1];
1654 c.g2 = true;
1657 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1658 c.u.s64.a = regs[r1];
1659 c.g1 = true;
1661 return help_branch(s, &c, is_imm, imm, o->in2);
1664 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1666 int imm, m3 = get_field(s->fields, m3);
1667 bool is_imm;
1668 DisasCompare c;
1670 c.cond = ltgt_cond[m3];
1671 if (s->insn->data) {
1672 c.cond = tcg_unsigned_cond(c.cond);
1674 c.is_64 = c.g1 = c.g2 = true;
1675 c.u.s64.a = o->in1;
1676 c.u.s64.b = o->in2;
1678 is_imm = have_field(s->fields, i4);
1679 if (is_imm) {
1680 imm = get_field(s->fields, i4);
1681 } else {
1682 imm = 0;
1683 o->out = get_address(s, 0, get_field(s->fields, b4),
1684 get_field(s->fields, d4));
1687 return help_branch(s, &c, is_imm, imm, o->out);
1690 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1692 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1693 set_cc_static(s);
1694 return NO_EXIT;
1697 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1699 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1700 set_cc_static(s);
1701 return NO_EXIT;
1704 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1706 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1707 set_cc_static(s);
1708 return NO_EXIT;
1711 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1713 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1714 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1715 tcg_temp_free_i32(m3);
1716 gen_set_cc_nz_f32(s, o->in2);
1717 return NO_EXIT;
1720 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1722 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1723 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1724 tcg_temp_free_i32(m3);
1725 gen_set_cc_nz_f64(s, o->in2);
1726 return NO_EXIT;
1729 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1731 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1732 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1733 tcg_temp_free_i32(m3);
1734 gen_set_cc_nz_f128(s, o->in1, o->in2);
1735 return NO_EXIT;
1738 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1740 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1741 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1742 tcg_temp_free_i32(m3);
1743 gen_set_cc_nz_f32(s, o->in2);
1744 return NO_EXIT;
1747 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1749 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1750 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1751 tcg_temp_free_i32(m3);
1752 gen_set_cc_nz_f64(s, o->in2);
1753 return NO_EXIT;
1756 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1758 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1759 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1760 tcg_temp_free_i32(m3);
1761 gen_set_cc_nz_f128(s, o->in1, o->in2);
1762 return NO_EXIT;
1765 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1767 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1768 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1769 tcg_temp_free_i32(m3);
1770 gen_set_cc_nz_f32(s, o->in2);
1771 return NO_EXIT;
1774 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1776 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1777 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1778 tcg_temp_free_i32(m3);
1779 gen_set_cc_nz_f64(s, o->in2);
1780 return NO_EXIT;
1783 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1785 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1786 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1787 tcg_temp_free_i32(m3);
1788 gen_set_cc_nz_f128(s, o->in1, o->in2);
1789 return NO_EXIT;
1792 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1794 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1795 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1796 tcg_temp_free_i32(m3);
1797 gen_set_cc_nz_f32(s, o->in2);
1798 return NO_EXIT;
1801 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1803 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1804 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1805 tcg_temp_free_i32(m3);
1806 gen_set_cc_nz_f64(s, o->in2);
1807 return NO_EXIT;
1810 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1812 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1813 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1814 tcg_temp_free_i32(m3);
1815 gen_set_cc_nz_f128(s, o->in1, o->in2);
1816 return NO_EXIT;
1819 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1821 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1822 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1823 tcg_temp_free_i32(m3);
1824 return NO_EXIT;
1827 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1829 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1830 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1831 tcg_temp_free_i32(m3);
1832 return NO_EXIT;
1835 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1837 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1838 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1839 tcg_temp_free_i32(m3);
1840 return_low128(o->out2);
1841 return NO_EXIT;
1844 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1846 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1847 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1848 tcg_temp_free_i32(m3);
1849 return NO_EXIT;
1852 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1854 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1855 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1856 tcg_temp_free_i32(m3);
1857 return NO_EXIT;
1860 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1862 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1863 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1864 tcg_temp_free_i32(m3);
1865 return_low128(o->out2);
1866 return NO_EXIT;
1869 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1871 int r2 = get_field(s->fields, r2);
1872 TCGv_i64 len = tcg_temp_new_i64();
1874 potential_page_fault(s);
1875 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1876 set_cc_static(s);
1877 return_low128(o->out);
1879 tcg_gen_add_i64(regs[r2], regs[r2], len);
1880 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1881 tcg_temp_free_i64(len);
1883 return NO_EXIT;
1886 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1888 int l = get_field(s->fields, l1);
1889 TCGv_i32 vl;
1891 switch (l + 1) {
1892 case 1:
1893 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1894 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1895 break;
1896 case 2:
1897 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1898 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1899 break;
1900 case 4:
1901 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1902 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1903 break;
1904 case 8:
1905 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1906 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1907 break;
1908 default:
1909 vl = tcg_const_i32(l);
1910 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1911 tcg_temp_free_i32(vl);
1912 set_cc_static(s);
1913 return NO_EXIT;
1915 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1916 return NO_EXIT;
1919 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1921 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1922 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1923 potential_page_fault(s);
1924 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1925 tcg_temp_free_i32(r1);
1926 tcg_temp_free_i32(r3);
1927 set_cc_static(s);
1928 return NO_EXIT;
1931 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1933 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1934 TCGv_i32 t1 = tcg_temp_new_i32();
1935 tcg_gen_extrl_i64_i32(t1, o->in1);
1936 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1937 set_cc_static(s);
1938 tcg_temp_free_i32(t1);
1939 tcg_temp_free_i32(m3);
1940 return NO_EXIT;
1943 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1945 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1946 set_cc_static(s);
1947 return_low128(o->in2);
1948 return NO_EXIT;
1951 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1953 TCGv_i64 t = tcg_temp_new_i64();
1954 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1955 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1956 tcg_gen_or_i64(o->out, o->out, t);
1957 tcg_temp_free_i64(t);
1958 return NO_EXIT;
1961 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1963 int d2 = get_field(s->fields, d2);
1964 int b2 = get_field(s->fields, b2);
1965 TCGv_i64 addr, cc;
1967 /* Note that in1 = R3 (new value) and
1968 in2 = (zero-extended) R1 (expected value). */
1970 addr = get_address(s, 0, b2, d2);
1971 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
1972 get_mem_index(s), s->insn->data | MO_ALIGN);
1973 tcg_temp_free_i64(addr);
1975 /* Are the memory and expected values (un)equal? Note that this setcond
1976 produces the output CC value, thus the NE sense of the test. */
1977 cc = tcg_temp_new_i64();
1978 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1979 tcg_gen_extrl_i64_i32(cc_op, cc);
1980 tcg_temp_free_i64(cc);
1981 set_cc_static(s);
1983 return NO_EXIT;
1986 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1988 int r1 = get_field(s->fields, r1);
1989 int r3 = get_field(s->fields, r3);
1990 int d2 = get_field(s->fields, d2);
1991 int b2 = get_field(s->fields, b2);
1992 TCGv_i64 addr;
1993 TCGv_i32 t_r1, t_r3;
1995 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1996 addr = get_address(s, 0, b2, d2);
1997 t_r1 = tcg_const_i32(r1);
1998 t_r3 = tcg_const_i32(r3);
1999 gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2000 tcg_temp_free_i64(addr);
2001 tcg_temp_free_i32(t_r1);
2002 tcg_temp_free_i32(t_r3);
2004 set_cc_static(s);
2005 return NO_EXIT;
2008 #ifndef CONFIG_USER_ONLY
2009 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
2011 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2012 check_privileged(s);
2013 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
2014 tcg_temp_free_i32(r1);
2015 set_cc_static(s);
2016 return NO_EXIT;
2018 #endif
2020 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2022 TCGv_i64 t1 = tcg_temp_new_i64();
2023 TCGv_i32 t2 = tcg_temp_new_i32();
2024 tcg_gen_extrl_i64_i32(t2, o->in1);
2025 gen_helper_cvd(t1, t2);
2026 tcg_temp_free_i32(t2);
2027 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2028 tcg_temp_free_i64(t1);
2029 return NO_EXIT;
2032 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
2034 int m3 = get_field(s->fields, m3);
2035 TCGLabel *lab = gen_new_label();
2036 TCGCond c;
2038 c = tcg_invert_cond(ltgt_cond[m3]);
2039 if (s->insn->data) {
2040 c = tcg_unsigned_cond(c);
2042 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2044 /* Trap. */
2045 gen_trap(s);
2047 gen_set_label(lab);
2048 return NO_EXIT;
2051 #ifndef CONFIG_USER_ONLY
2052 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2054 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2055 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2056 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2058 check_privileged(s);
2059 update_psw_addr(s);
2060 gen_op_calc_cc(s);
2062 gen_helper_diag(cpu_env, r1, r3, func_code);
2064 tcg_temp_free_i32(func_code);
2065 tcg_temp_free_i32(r3);
2066 tcg_temp_free_i32(r1);
2067 return NO_EXIT;
2069 #endif
2071 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2073 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2074 return_low128(o->out);
2075 return NO_EXIT;
2078 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2080 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2081 return_low128(o->out);
2082 return NO_EXIT;
2085 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2087 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2088 return_low128(o->out);
2089 return NO_EXIT;
2092 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2094 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2095 return_low128(o->out);
2096 return NO_EXIT;
2099 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2101 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2102 return NO_EXIT;
2105 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2107 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2108 return NO_EXIT;
2111 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2113 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2114 return_low128(o->out2);
2115 return NO_EXIT;
2118 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2120 int r2 = get_field(s->fields, r2);
2121 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2122 return NO_EXIT;
2125 static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
2127 /* No cache information provided. */
2128 tcg_gen_movi_i64(o->out, -1);
2129 return NO_EXIT;
2132 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2134 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2135 return NO_EXIT;
2138 static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
2140 int r1 = get_field(s->fields, r1);
2141 int r2 = get_field(s->fields, r2);
2142 TCGv_i64 t = tcg_temp_new_i64();
2144 /* Note the "subsequently" in the PoO, which implies a defined result
2145 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2146 tcg_gen_shri_i64(t, psw_mask, 32);
2147 store_reg32_i64(r1, t);
2148 if (r2 != 0) {
2149 store_reg32_i64(r2, psw_mask);
2152 tcg_temp_free_i64(t);
2153 return NO_EXIT;
2156 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2158 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2159 tb->flags, (ab)use the tb->cs_base field as the address of
2160 the template in memory, and grab 8 bits of tb->flags/cflags for
2161 the contents of the register. We would then recognize all this
2162 in gen_intermediate_code_internal, generating code for exactly
2163 one instruction. This new TB then gets executed normally.
2165 On the other hand, this seems to be mostly used for modifying
2166 MVC inside of memcpy, which needs a helper call anyway. So
2167 perhaps this doesn't bear thinking about any further. */
2169 TCGv_i64 tmp;
2171 update_psw_addr(s);
2172 gen_op_calc_cc(s);
2174 tmp = tcg_const_i64(s->next_pc);
2175 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2176 tcg_temp_free_i64(tmp);
2178 return NO_EXIT;
2181 static ExitStatus op_fieb(DisasContext *s, DisasOps *o)
2183 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2184 gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2185 tcg_temp_free_i32(m3);
2186 return NO_EXIT;
2189 static ExitStatus op_fidb(DisasContext *s, DisasOps *o)
2191 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2192 gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2193 tcg_temp_free_i32(m3);
2194 return NO_EXIT;
2197 static ExitStatus op_fixb(DisasContext *s, DisasOps *o)
2199 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2200 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2201 return_low128(o->out2);
2202 tcg_temp_free_i32(m3);
2203 return NO_EXIT;
2206 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2208 /* We'll use the original input for cc computation, since we get to
2209 compare that against 0, which ought to be better than comparing
2210 the real output against 64. It also lets cc_dst be a convenient
2211 temporary during our computation. */
2212 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2214 /* R1 = IN ? CLZ(IN) : 64. */
2215 tcg_gen_clzi_i64(o->out, o->in2, 64);
2217 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2218 value by 64, which is undefined. But since the shift is 64 iff the
2219 input is zero, we still get the correct result after and'ing. */
2220 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2221 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2222 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2223 return NO_EXIT;
2226 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2228 int m3 = get_field(s->fields, m3);
2229 int pos, len, base = s->insn->data;
2230 TCGv_i64 tmp = tcg_temp_new_i64();
2231 uint64_t ccm;
2233 switch (m3) {
2234 case 0xf:
2235 /* Effectively a 32-bit load. */
2236 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2237 len = 32;
2238 goto one_insert;
2240 case 0xc:
2241 case 0x6:
2242 case 0x3:
2243 /* Effectively a 16-bit load. */
2244 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2245 len = 16;
2246 goto one_insert;
2248 case 0x8:
2249 case 0x4:
2250 case 0x2:
2251 case 0x1:
2252 /* Effectively an 8-bit load. */
2253 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2254 len = 8;
2255 goto one_insert;
2257 one_insert:
2258 pos = base + ctz32(m3) * 8;
2259 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2260 ccm = ((1ull << len) - 1) << pos;
2261 break;
2263 default:
2264 /* This is going to be a sequence of loads and inserts. */
2265 pos = base + 32 - 8;
2266 ccm = 0;
2267 while (m3) {
2268 if (m3 & 0x8) {
2269 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2270 tcg_gen_addi_i64(o->in2, o->in2, 1);
2271 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2272 ccm |= 0xff << pos;
2274 m3 = (m3 << 1) & 0xf;
2275 pos -= 8;
2277 break;
2280 tcg_gen_movi_i64(tmp, ccm);
2281 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2282 tcg_temp_free_i64(tmp);
2283 return NO_EXIT;
2286 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2288 int shift = s->insn->data & 0xff;
2289 int size = s->insn->data >> 8;
2290 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2291 return NO_EXIT;
2294 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2296 TCGv_i64 t1;
2298 gen_op_calc_cc(s);
2299 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2301 t1 = tcg_temp_new_i64();
2302 tcg_gen_shli_i64(t1, psw_mask, 20);
2303 tcg_gen_shri_i64(t1, t1, 36);
2304 tcg_gen_or_i64(o->out, o->out, t1);
2306 tcg_gen_extu_i32_i64(t1, cc_op);
2307 tcg_gen_shli_i64(t1, t1, 28);
2308 tcg_gen_or_i64(o->out, o->out, t1);
2309 tcg_temp_free_i64(t1);
2310 return NO_EXIT;
2313 #ifndef CONFIG_USER_ONLY
2314 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2316 check_privileged(s);
2317 gen_helper_ipte(cpu_env, o->in1, o->in2);
2318 return NO_EXIT;
2321 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2323 check_privileged(s);
2324 gen_helper_iske(o->out, cpu_env, o->in2);
2325 return NO_EXIT;
2327 #endif
2329 static ExitStatus op_laa(DisasContext *s, DisasOps *o)
2331 /* The real output is indeed the original value in memory;
2332 recompute the addition for the computation of CC. */
2333 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2334 s->insn->data | MO_ALIGN);
2335 /* However, we need to recompute the addition for setting CC. */
2336 tcg_gen_add_i64(o->out, o->in1, o->in2);
2337 return NO_EXIT;
2340 static ExitStatus op_lan(DisasContext *s, DisasOps *o)
2342 /* The real output is indeed the original value in memory;
2343 recompute the addition for the computation of CC. */
2344 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2345 s->insn->data | MO_ALIGN);
2346 /* However, we need to recompute the operation for setting CC. */
2347 tcg_gen_and_i64(o->out, o->in1, o->in2);
2348 return NO_EXIT;
2351 static ExitStatus op_lao(DisasContext *s, DisasOps *o)
2353 /* The real output is indeed the original value in memory;
2354 recompute the addition for the computation of CC. */
2355 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2356 s->insn->data | MO_ALIGN);
2357 /* However, we need to recompute the operation for setting CC. */
2358 tcg_gen_or_i64(o->out, o->in1, o->in2);
2359 return NO_EXIT;
2362 static ExitStatus op_lax(DisasContext *s, DisasOps *o)
2364 /* The real output is indeed the original value in memory;
2365 recompute the addition for the computation of CC. */
2366 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2367 s->insn->data | MO_ALIGN);
2368 /* However, we need to recompute the operation for setting CC. */
2369 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2370 return NO_EXIT;
2373 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2375 gen_helper_ldeb(o->out, cpu_env, o->in2);
2376 return NO_EXIT;
2379 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2381 gen_helper_ledb(o->out, cpu_env, o->in2);
2382 return NO_EXIT;
2385 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2387 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2388 return NO_EXIT;
2391 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2393 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2394 return NO_EXIT;
2397 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2399 gen_helper_lxdb(o->out, cpu_env, o->in2);
2400 return_low128(o->out2);
2401 return NO_EXIT;
2404 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2406 gen_helper_lxeb(o->out, cpu_env, o->in2);
2407 return_low128(o->out2);
2408 return NO_EXIT;
2411 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2413 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2414 return NO_EXIT;
2417 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2419 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2420 return NO_EXIT;
2423 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2425 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2426 return NO_EXIT;
2429 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2431 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2432 return NO_EXIT;
2435 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2437 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2438 return NO_EXIT;
2441 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2443 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2444 return NO_EXIT;
2447 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2449 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2450 return NO_EXIT;
2453 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2455 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2456 return NO_EXIT;
2459 static ExitStatus op_lat(DisasContext *s, DisasOps *o)
2461 TCGLabel *lab = gen_new_label();
2462 store_reg32_i64(get_field(s->fields, r1), o->in2);
2463 /* The value is stored even in case of trap. */
2464 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2465 gen_trap(s);
2466 gen_set_label(lab);
2467 return NO_EXIT;
2470 static ExitStatus op_lgat(DisasContext *s, DisasOps *o)
2472 TCGLabel *lab = gen_new_label();
2473 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2474 /* The value is stored even in case of trap. */
2475 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2476 gen_trap(s);
2477 gen_set_label(lab);
2478 return NO_EXIT;
2481 static ExitStatus op_lfhat(DisasContext *s, DisasOps *o)
2483 TCGLabel *lab = gen_new_label();
2484 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2485 /* The value is stored even in case of trap. */
2486 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2487 gen_trap(s);
2488 gen_set_label(lab);
2489 return NO_EXIT;
2492 static ExitStatus op_llgfat(DisasContext *s, DisasOps *o)
2494 TCGLabel *lab = gen_new_label();
2495 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2496 /* The value is stored even in case of trap. */
2497 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2498 gen_trap(s);
2499 gen_set_label(lab);
2500 return NO_EXIT;
2503 static ExitStatus op_llgtat(DisasContext *s, DisasOps *o)
2505 TCGLabel *lab = gen_new_label();
2506 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2507 /* The value is stored even in case of trap. */
2508 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2509 gen_trap(s);
2510 gen_set_label(lab);
2511 return NO_EXIT;
2514 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2516 DisasCompare c;
2518 disas_jcc(s, &c, get_field(s->fields, m3));
2520 if (c.is_64) {
2521 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2522 o->in2, o->in1);
2523 free_compare(&c);
2524 } else {
2525 TCGv_i32 t32 = tcg_temp_new_i32();
2526 TCGv_i64 t, z;
2528 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2529 free_compare(&c);
2531 t = tcg_temp_new_i64();
2532 tcg_gen_extu_i32_i64(t, t32);
2533 tcg_temp_free_i32(t32);
2535 z = tcg_const_i64(0);
2536 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2537 tcg_temp_free_i64(t);
2538 tcg_temp_free_i64(z);
2541 return NO_EXIT;
2544 #ifndef CONFIG_USER_ONLY
2545 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2547 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2548 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2549 check_privileged(s);
2550 potential_page_fault(s);
2551 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2552 tcg_temp_free_i32(r1);
2553 tcg_temp_free_i32(r3);
2554 return NO_EXIT;
2557 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2559 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2560 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2561 check_privileged(s);
2562 potential_page_fault(s);
2563 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2564 tcg_temp_free_i32(r1);
2565 tcg_temp_free_i32(r3);
2566 return NO_EXIT;
2569 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2571 check_privileged(s);
2572 potential_page_fault(s);
2573 gen_helper_lra(o->out, cpu_env, o->in2);
2574 set_cc_static(s);
2575 return NO_EXIT;
2578 static ExitStatus op_lpp(DisasContext *s, DisasOps *o)
2580 check_privileged(s);
2582 tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
2583 return NO_EXIT;
2586 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2588 TCGv_i64 t1, t2;
2590 check_privileged(s);
2591 per_breaking_event(s);
2593 t1 = tcg_temp_new_i64();
2594 t2 = tcg_temp_new_i64();
2595 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2596 tcg_gen_addi_i64(o->in2, o->in2, 4);
2597 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2598 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2599 tcg_gen_shli_i64(t1, t1, 32);
2600 gen_helper_load_psw(cpu_env, t1, t2);
2601 tcg_temp_free_i64(t1);
2602 tcg_temp_free_i64(t2);
2603 return EXIT_NORETURN;
2606 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2608 TCGv_i64 t1, t2;
2610 check_privileged(s);
2611 per_breaking_event(s);
2613 t1 = tcg_temp_new_i64();
2614 t2 = tcg_temp_new_i64();
2615 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2616 tcg_gen_addi_i64(o->in2, o->in2, 8);
2617 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2618 gen_helper_load_psw(cpu_env, t1, t2);
2619 tcg_temp_free_i64(t1);
2620 tcg_temp_free_i64(t2);
2621 return EXIT_NORETURN;
2623 #endif
2625 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2627 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2628 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2629 potential_page_fault(s);
2630 gen_helper_lam(cpu_env, r1, o->in2, r3);
2631 tcg_temp_free_i32(r1);
2632 tcg_temp_free_i32(r3);
2633 return NO_EXIT;
2636 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2638 int r1 = get_field(s->fields, r1);
2639 int r3 = get_field(s->fields, r3);
2640 TCGv_i64 t1, t2;
2642 /* Only one register to read. */
2643 t1 = tcg_temp_new_i64();
2644 if (unlikely(r1 == r3)) {
2645 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2646 store_reg32_i64(r1, t1);
2647 tcg_temp_free(t1);
2648 return NO_EXIT;
2651 /* First load the values of the first and last registers to trigger
2652 possible page faults. */
2653 t2 = tcg_temp_new_i64();
2654 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2655 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2656 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2657 store_reg32_i64(r1, t1);
2658 store_reg32_i64(r3, t2);
2660 /* Only two registers to read. */
2661 if (((r1 + 1) & 15) == r3) {
2662 tcg_temp_free(t2);
2663 tcg_temp_free(t1);
2664 return NO_EXIT;
2667 /* Then load the remaining registers. Page fault can't occur. */
2668 r3 = (r3 - 1) & 15;
2669 tcg_gen_movi_i64(t2, 4);
2670 while (r1 != r3) {
2671 r1 = (r1 + 1) & 15;
2672 tcg_gen_add_i64(o->in2, o->in2, t2);
2673 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2674 store_reg32_i64(r1, t1);
2676 tcg_temp_free(t2);
2677 tcg_temp_free(t1);
2679 return NO_EXIT;
2682 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2684 int r1 = get_field(s->fields, r1);
2685 int r3 = get_field(s->fields, r3);
2686 TCGv_i64 t1, t2;
2688 /* Only one register to read. */
2689 t1 = tcg_temp_new_i64();
2690 if (unlikely(r1 == r3)) {
2691 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2692 store_reg32h_i64(r1, t1);
2693 tcg_temp_free(t1);
2694 return NO_EXIT;
2697 /* First load the values of the first and last registers to trigger
2698 possible page faults. */
2699 t2 = tcg_temp_new_i64();
2700 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2701 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2702 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2703 store_reg32h_i64(r1, t1);
2704 store_reg32h_i64(r3, t2);
2706 /* Only two registers to read. */
2707 if (((r1 + 1) & 15) == r3) {
2708 tcg_temp_free(t2);
2709 tcg_temp_free(t1);
2710 return NO_EXIT;
2713 /* Then load the remaining registers. Page fault can't occur. */
2714 r3 = (r3 - 1) & 15;
2715 tcg_gen_movi_i64(t2, 4);
2716 while (r1 != r3) {
2717 r1 = (r1 + 1) & 15;
2718 tcg_gen_add_i64(o->in2, o->in2, t2);
2719 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2720 store_reg32h_i64(r1, t1);
2722 tcg_temp_free(t2);
2723 tcg_temp_free(t1);
2725 return NO_EXIT;
2728 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2730 int r1 = get_field(s->fields, r1);
2731 int r3 = get_field(s->fields, r3);
2732 TCGv_i64 t1, t2;
2734 /* Only one register to read. */
2735 if (unlikely(r1 == r3)) {
2736 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2737 return NO_EXIT;
2740 /* First load the values of the first and last registers to trigger
2741 possible page faults. */
2742 t1 = tcg_temp_new_i64();
2743 t2 = tcg_temp_new_i64();
2744 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2745 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
2746 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
2747 tcg_gen_mov_i64(regs[r1], t1);
2748 tcg_temp_free(t2);
2750 /* Only two registers to read. */
2751 if (((r1 + 1) & 15) == r3) {
2752 tcg_temp_free(t1);
2753 return NO_EXIT;
2756 /* Then load the remaining registers. Page fault can't occur. */
2757 r3 = (r3 - 1) & 15;
2758 tcg_gen_movi_i64(t1, 8);
2759 while (r1 != r3) {
2760 r1 = (r1 + 1) & 15;
2761 tcg_gen_add_i64(o->in2, o->in2, t1);
2762 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2764 tcg_temp_free(t1);
2766 return NO_EXIT;
2769 static ExitStatus op_lpd(DisasContext *s, DisasOps *o)
2771 TCGv_i64 a1, a2;
2772 TCGMemOp mop = s->insn->data;
2774 /* In a parallel context, stop the world and single step. */
2775 if (parallel_cpus) {
2776 potential_page_fault(s);
2777 gen_exception(EXCP_ATOMIC);
2778 return EXIT_NORETURN;
2781 /* In a serial context, perform the two loads ... */
2782 a1 = get_address(s, 0, get_field(s->fields, b1), get_field(s->fields, d1));
2783 a2 = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
2784 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
2785 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
2786 tcg_temp_free_i64(a1);
2787 tcg_temp_free_i64(a2);
2789 /* ... and indicate that we performed them while interlocked. */
2790 gen_op_movi_cc(s, 0);
2791 return NO_EXIT;
2794 #ifndef CONFIG_USER_ONLY
2795 static ExitStatus op_lura(DisasContext *s, DisasOps *o)
2797 check_privileged(s);
2798 potential_page_fault(s);
2799 gen_helper_lura(o->out, cpu_env, o->in2);
2800 return NO_EXIT;
2803 static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
2805 check_privileged(s);
2806 potential_page_fault(s);
2807 gen_helper_lurag(o->out, cpu_env, o->in2);
2808 return NO_EXIT;
2810 #endif
2812 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2814 o->out = o->in2;
2815 o->g_out = o->g_in2;
2816 TCGV_UNUSED_I64(o->in2);
2817 o->g_in2 = false;
2818 return NO_EXIT;
2821 static ExitStatus op_mov2e(DisasContext *s, DisasOps *o)
2823 int b2 = get_field(s->fields, b2);
2824 TCGv ar1 = tcg_temp_new_i64();
2826 o->out = o->in2;
2827 o->g_out = o->g_in2;
2828 TCGV_UNUSED_I64(o->in2);
2829 o->g_in2 = false;
2831 switch (s->tb->flags & FLAG_MASK_ASC) {
2832 case PSW_ASC_PRIMARY >> 32:
2833 tcg_gen_movi_i64(ar1, 0);
2834 break;
2835 case PSW_ASC_ACCREG >> 32:
2836 tcg_gen_movi_i64(ar1, 1);
2837 break;
2838 case PSW_ASC_SECONDARY >> 32:
2839 if (b2) {
2840 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
2841 } else {
2842 tcg_gen_movi_i64(ar1, 0);
2844 break;
2845 case PSW_ASC_HOME >> 32:
2846 tcg_gen_movi_i64(ar1, 2);
2847 break;
2850 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
2851 tcg_temp_free_i64(ar1);
2853 return NO_EXIT;
2856 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2858 o->out = o->in1;
2859 o->out2 = o->in2;
2860 o->g_out = o->g_in1;
2861 o->g_out2 = o->g_in2;
2862 TCGV_UNUSED_I64(o->in1);
2863 TCGV_UNUSED_I64(o->in2);
2864 o->g_in1 = o->g_in2 = false;
2865 return NO_EXIT;
2868 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2870 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2871 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2872 tcg_temp_free_i32(l);
2873 return NO_EXIT;
2876 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2878 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2879 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2880 potential_page_fault(s);
2881 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2882 tcg_temp_free_i32(r1);
2883 tcg_temp_free_i32(r2);
2884 set_cc_static(s);
2885 return NO_EXIT;
2888 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2890 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2891 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2892 potential_page_fault(s);
2893 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2894 tcg_temp_free_i32(r1);
2895 tcg_temp_free_i32(r3);
2896 set_cc_static(s);
2897 return NO_EXIT;
2900 #ifndef CONFIG_USER_ONLY
2901 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2903 int r1 = get_field(s->fields, l1);
2904 check_privileged(s);
2905 potential_page_fault(s);
2906 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2907 set_cc_static(s);
2908 return NO_EXIT;
2911 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2913 int r1 = get_field(s->fields, l1);
2914 check_privileged(s);
2915 potential_page_fault(s);
2916 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2917 set_cc_static(s);
2918 return NO_EXIT;
2920 #endif
2922 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2924 gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
2925 set_cc_static(s);
2926 return NO_EXIT;
2929 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2931 potential_page_fault(s);
2932 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2933 set_cc_static(s);
2934 return_low128(o->in2);
2935 return NO_EXIT;
2938 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2940 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2941 return NO_EXIT;
2944 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2946 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
2947 return NO_EXIT;
2950 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2952 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2953 return NO_EXIT;
2956 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2958 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2959 return NO_EXIT;
2962 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2964 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2965 return NO_EXIT;
2968 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2970 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2971 return_low128(o->out2);
2972 return NO_EXIT;
2975 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2977 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2978 return_low128(o->out2);
2979 return NO_EXIT;
2982 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2984 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2985 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2986 tcg_temp_free_i64(r3);
2987 return NO_EXIT;
2990 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2992 int r3 = get_field(s->fields, r3);
2993 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2994 return NO_EXIT;
2997 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2999 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3000 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3001 tcg_temp_free_i64(r3);
3002 return NO_EXIT;
3005 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
3007 int r3 = get_field(s->fields, r3);
3008 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3009 return NO_EXIT;
3012 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
3014 TCGv_i64 z, n;
3015 z = tcg_const_i64(0);
3016 n = tcg_temp_new_i64();
3017 tcg_gen_neg_i64(n, o->in2);
3018 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3019 tcg_temp_free_i64(n);
3020 tcg_temp_free_i64(z);
3021 return NO_EXIT;
3024 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
3026 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3027 return NO_EXIT;
3030 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
3032 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3033 return NO_EXIT;
3036 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
3038 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3039 tcg_gen_mov_i64(o->out2, o->in2);
3040 return NO_EXIT;
3043 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
3045 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3046 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3047 tcg_temp_free_i32(l);
3048 set_cc_static(s);
3049 return NO_EXIT;
3052 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
3054 tcg_gen_neg_i64(o->out, o->in2);
3055 return NO_EXIT;
3058 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
3060 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3061 return NO_EXIT;
3064 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
3066 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3067 return NO_EXIT;
3070 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
3072 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3073 tcg_gen_mov_i64(o->out2, o->in2);
3074 return NO_EXIT;
3077 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
3079 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3080 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3081 tcg_temp_free_i32(l);
3082 set_cc_static(s);
3083 return NO_EXIT;
3086 static ExitStatus op_or(DisasContext *s, DisasOps *o)
3088 tcg_gen_or_i64(o->out, o->in1, o->in2);
3089 return NO_EXIT;
3092 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
3094 int shift = s->insn->data & 0xff;
3095 int size = s->insn->data >> 8;
3096 uint64_t mask = ((1ull << size) - 1) << shift;
3098 assert(!o->g_in2);
3099 tcg_gen_shli_i64(o->in2, o->in2, shift);
3100 tcg_gen_or_i64(o->out, o->in1, o->in2);
3102 /* Produce the CC from only the bits manipulated. */
3103 tcg_gen_andi_i64(cc_dst, o->out, mask);
3104 set_cc_nz_u64(s, cc_dst);
3105 return NO_EXIT;
3108 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
3110 gen_helper_popcnt(o->out, o->in2);
3111 return NO_EXIT;
3114 #ifndef CONFIG_USER_ONLY
3115 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
3117 check_privileged(s);
3118 gen_helper_ptlb(cpu_env);
3119 return NO_EXIT;
3121 #endif
3123 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
3125 int i3 = get_field(s->fields, i3);
3126 int i4 = get_field(s->fields, i4);
3127 int i5 = get_field(s->fields, i5);
3128 int do_zero = i4 & 0x80;
3129 uint64_t mask, imask, pmask;
3130 int pos, len, rot;
3132 /* Adjust the arguments for the specific insn. */
3133 switch (s->fields->op2) {
3134 case 0x55: /* risbg */
3135 i3 &= 63;
3136 i4 &= 63;
3137 pmask = ~0;
3138 break;
3139 case 0x5d: /* risbhg */
3140 i3 &= 31;
3141 i4 &= 31;
3142 pmask = 0xffffffff00000000ull;
3143 break;
3144 case 0x51: /* risblg */
3145 i3 &= 31;
3146 i4 &= 31;
3147 pmask = 0x00000000ffffffffull;
3148 break;
3149 default:
3150 abort();
3153 /* MASK is the set of bits to be inserted from R2.
3154 Take care for I3/I4 wraparound. */
3155 mask = pmask >> i3;
3156 if (i3 <= i4) {
3157 mask ^= pmask >> i4 >> 1;
3158 } else {
3159 mask |= ~(pmask >> i4 >> 1);
3161 mask &= pmask;
3163 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3164 insns, we need to keep the other half of the register. */
3165 imask = ~mask | ~pmask;
3166 if (do_zero) {
3167 if (s->fields->op2 == 0x55) {
3168 imask = 0;
3169 } else {
3170 imask = ~pmask;
3174 len = i4 - i3 + 1;
3175 pos = 63 - i4;
3176 rot = i5 & 63;
3177 if (s->fields->op2 == 0x5d) {
3178 pos += 32;
3181 /* In some cases we can implement this with extract. */
3182 if (imask == 0 && pos == 0 && len > 0 && rot + len <= 64) {
3183 tcg_gen_extract_i64(o->out, o->in2, rot, len);
3184 return NO_EXIT;
3187 /* In some cases we can implement this with deposit. */
3188 if (len > 0 && (imask == 0 || ~mask == imask)) {
3189 /* Note that we rotate the bits to be inserted to the lsb, not to
3190 the position as described in the PoO. */
3191 rot = (rot - pos) & 63;
3192 } else {
3193 pos = -1;
3196 /* Rotate the input as necessary. */
3197 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3199 /* Insert the selected bits into the output. */
3200 if (pos >= 0) {
3201 if (imask == 0) {
3202 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3203 } else {
3204 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3206 } else if (imask == 0) {
3207 tcg_gen_andi_i64(o->out, o->in2, mask);
3208 } else {
3209 tcg_gen_andi_i64(o->in2, o->in2, mask);
3210 tcg_gen_andi_i64(o->out, o->out, imask);
3211 tcg_gen_or_i64(o->out, o->out, o->in2);
3213 return NO_EXIT;
3216 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
3218 int i3 = get_field(s->fields, i3);
3219 int i4 = get_field(s->fields, i4);
3220 int i5 = get_field(s->fields, i5);
3221 uint64_t mask;
3223 /* If this is a test-only form, arrange to discard the result. */
3224 if (i3 & 0x80) {
3225 o->out = tcg_temp_new_i64();
3226 o->g_out = false;
3229 i3 &= 63;
3230 i4 &= 63;
3231 i5 &= 63;
3233 /* MASK is the set of bits to be operated on from R2.
3234 Take care for I3/I4 wraparound. */
3235 mask = ~0ull >> i3;
3236 if (i3 <= i4) {
3237 mask ^= ~0ull >> i4 >> 1;
3238 } else {
3239 mask |= ~(~0ull >> i4 >> 1);
3242 /* Rotate the input as necessary. */
3243 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3245 /* Operate. */
3246 switch (s->fields->op2) {
3247 case 0x55: /* AND */
3248 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3249 tcg_gen_and_i64(o->out, o->out, o->in2);
3250 break;
3251 case 0x56: /* OR */
3252 tcg_gen_andi_i64(o->in2, o->in2, mask);
3253 tcg_gen_or_i64(o->out, o->out, o->in2);
3254 break;
3255 case 0x57: /* XOR */
3256 tcg_gen_andi_i64(o->in2, o->in2, mask);
3257 tcg_gen_xor_i64(o->out, o->out, o->in2);
3258 break;
3259 default:
3260 abort();
3263 /* Set the CC. */
3264 tcg_gen_andi_i64(cc_dst, o->out, mask);
3265 set_cc_nz_u64(s, cc_dst);
3266 return NO_EXIT;
3269 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
3271 tcg_gen_bswap16_i64(o->out, o->in2);
3272 return NO_EXIT;
3275 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
3277 tcg_gen_bswap32_i64(o->out, o->in2);
3278 return NO_EXIT;
3281 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
3283 tcg_gen_bswap64_i64(o->out, o->in2);
3284 return NO_EXIT;
3287 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
3289 TCGv_i32 t1 = tcg_temp_new_i32();
3290 TCGv_i32 t2 = tcg_temp_new_i32();
3291 TCGv_i32 to = tcg_temp_new_i32();
3292 tcg_gen_extrl_i64_i32(t1, o->in1);
3293 tcg_gen_extrl_i64_i32(t2, o->in2);
3294 tcg_gen_rotl_i32(to, t1, t2);
3295 tcg_gen_extu_i32_i64(o->out, to);
3296 tcg_temp_free_i32(t1);
3297 tcg_temp_free_i32(t2);
3298 tcg_temp_free_i32(to);
3299 return NO_EXIT;
3302 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
3304 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3305 return NO_EXIT;
3308 #ifndef CONFIG_USER_ONLY
3309 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
3311 check_privileged(s);
3312 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3313 set_cc_static(s);
3314 return NO_EXIT;
3317 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
3319 check_privileged(s);
3320 gen_helper_sacf(cpu_env, o->in2);
3321 /* Addressing mode has changed, so end the block. */
3322 return EXIT_PC_STALE;
3324 #endif
3326 static ExitStatus op_sam(DisasContext *s, DisasOps *o)
3328 int sam = s->insn->data;
3329 TCGv_i64 tsam;
3330 uint64_t mask;
3332 switch (sam) {
3333 case 0:
3334 mask = 0xffffff;
3335 break;
3336 case 1:
3337 mask = 0x7fffffff;
3338 break;
3339 default:
3340 mask = -1;
3341 break;
3344 /* Bizarre but true, we check the address of the current insn for the
3345 specification exception, not the next to be executed. Thus the PoO
3346 documents that Bad Things Happen two bytes before the end. */
3347 if (s->pc & ~mask) {
3348 gen_program_exception(s, PGM_SPECIFICATION);
3349 return EXIT_NORETURN;
3351 s->next_pc &= mask;
3353 tsam = tcg_const_i64(sam);
3354 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3355 tcg_temp_free_i64(tsam);
3357 /* Always exit the TB, since we (may have) changed execution mode. */
3358 return EXIT_PC_STALE;
3361 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
3363 int r1 = get_field(s->fields, r1);
3364 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3365 return NO_EXIT;
3368 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
3370 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3371 return NO_EXIT;
3374 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
3376 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3377 return NO_EXIT;
3380 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
3382 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3383 return_low128(o->out2);
3384 return NO_EXIT;
3387 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
3389 gen_helper_sqeb(o->out, cpu_env, o->in2);
3390 return NO_EXIT;
3393 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
3395 gen_helper_sqdb(o->out, cpu_env, o->in2);
3396 return NO_EXIT;
3399 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
3401 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3402 return_low128(o->out2);
3403 return NO_EXIT;
3406 #ifndef CONFIG_USER_ONLY
3407 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
3409 check_privileged(s);
3410 potential_page_fault(s);
3411 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3412 set_cc_static(s);
3413 return NO_EXIT;
3416 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3418 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3419 check_privileged(s);
3420 potential_page_fault(s);
3421 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
3422 set_cc_static(s);
3423 tcg_temp_free_i32(r1);
3424 return NO_EXIT;
3426 #endif
3428 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3430 DisasCompare c;
3431 TCGv_i64 a;
3432 TCGLabel *lab;
3433 int r1;
3435 disas_jcc(s, &c, get_field(s->fields, m3));
3437 /* We want to store when the condition is fulfilled, so branch
3438 out when it's not */
3439 c.cond = tcg_invert_cond(c.cond);
3441 lab = gen_new_label();
3442 if (c.is_64) {
3443 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3444 } else {
3445 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3447 free_compare(&c);
3449 r1 = get_field(s->fields, r1);
3450 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3451 if (s->insn->data) {
3452 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3453 } else {
3454 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3456 tcg_temp_free_i64(a);
3458 gen_set_label(lab);
3459 return NO_EXIT;
3462 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3464 uint64_t sign = 1ull << s->insn->data;
3465 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3466 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3467 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3468 /* The arithmetic left shift is curious in that it does not affect
3469 the sign bit. Copy that over from the source unchanged. */
3470 tcg_gen_andi_i64(o->out, o->out, ~sign);
3471 tcg_gen_andi_i64(o->in1, o->in1, sign);
3472 tcg_gen_or_i64(o->out, o->out, o->in1);
3473 return NO_EXIT;
3476 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3478 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3479 return NO_EXIT;
3482 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3484 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3485 return NO_EXIT;
3488 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3490 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3491 return NO_EXIT;
3494 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3496 gen_helper_sfpc(cpu_env, o->in2);
3497 return NO_EXIT;
3500 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3502 gen_helper_sfas(cpu_env, o->in2);
3503 return NO_EXIT;
3506 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3508 int b2 = get_field(s->fields, b2);
3509 int d2 = get_field(s->fields, d2);
3510 TCGv_i64 t1 = tcg_temp_new_i64();
3511 TCGv_i64 t2 = tcg_temp_new_i64();
3512 int mask, pos, len;
3514 switch (s->fields->op2) {
3515 case 0x99: /* SRNM */
3516 pos = 0, len = 2;
3517 break;
3518 case 0xb8: /* SRNMB */
3519 pos = 0, len = 3;
3520 break;
3521 case 0xb9: /* SRNMT */
3522 pos = 4, len = 3;
3523 break;
3524 default:
3525 tcg_abort();
3527 mask = (1 << len) - 1;
3529 /* Insert the value into the appropriate field of the FPC. */
3530 if (b2 == 0) {
3531 tcg_gen_movi_i64(t1, d2 & mask);
3532 } else {
3533 tcg_gen_addi_i64(t1, regs[b2], d2);
3534 tcg_gen_andi_i64(t1, t1, mask);
3536 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3537 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3538 tcg_temp_free_i64(t1);
3540 /* Then install the new FPC to set the rounding mode in fpu_status. */
3541 gen_helper_sfpc(cpu_env, t2);
3542 tcg_temp_free_i64(t2);
3543 return NO_EXIT;
3546 #ifndef CONFIG_USER_ONLY
3547 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3549 check_privileged(s);
3550 tcg_gen_shri_i64(o->in2, o->in2, 4);
3551 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
3552 return NO_EXIT;
3555 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3557 check_privileged(s);
3558 gen_helper_sske(cpu_env, o->in1, o->in2);
3559 return NO_EXIT;
3562 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3564 check_privileged(s);
3565 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3566 return NO_EXIT;
3569 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3571 check_privileged(s);
3572 /* ??? Surely cpu address != cpu number. In any case the previous
3573 version of this stored more than the required half-word, so it
3574 is unlikely this has ever been tested. */
3575 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3576 return NO_EXIT;
3579 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3581 gen_helper_stck(o->out, cpu_env);
3582 /* ??? We don't implement clock states. */
3583 gen_op_movi_cc(s, 0);
3584 return NO_EXIT;
3587 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3589 TCGv_i64 c1 = tcg_temp_new_i64();
3590 TCGv_i64 c2 = tcg_temp_new_i64();
3591 gen_helper_stck(c1, cpu_env);
3592 /* Shift the 64-bit value into its place as a zero-extended
3593 104-bit value. Note that "bit positions 64-103 are always
3594 non-zero so that they compare differently to STCK"; we set
3595 the least significant bit to 1. */
3596 tcg_gen_shli_i64(c2, c1, 56);
3597 tcg_gen_shri_i64(c1, c1, 8);
3598 tcg_gen_ori_i64(c2, c2, 0x10000);
3599 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3600 tcg_gen_addi_i64(o->in2, o->in2, 8);
3601 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3602 tcg_temp_free_i64(c1);
3603 tcg_temp_free_i64(c2);
3604 /* ??? We don't implement clock states. */
3605 gen_op_movi_cc(s, 0);
3606 return NO_EXIT;
3609 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3611 check_privileged(s);
3612 gen_helper_sckc(cpu_env, o->in2);
3613 return NO_EXIT;
3616 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3618 check_privileged(s);
3619 gen_helper_stckc(o->out, cpu_env);
3620 return NO_EXIT;
3623 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3625 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3626 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3627 check_privileged(s);
3628 potential_page_fault(s);
3629 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3630 tcg_temp_free_i32(r1);
3631 tcg_temp_free_i32(r3);
3632 return NO_EXIT;
3635 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3637 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3638 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3639 check_privileged(s);
3640 potential_page_fault(s);
3641 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3642 tcg_temp_free_i32(r1);
3643 tcg_temp_free_i32(r3);
3644 return NO_EXIT;
3647 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3649 TCGv_i64 t1 = tcg_temp_new_i64();
3651 check_privileged(s);
3652 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3653 tcg_gen_ld32u_i64(t1, cpu_env, offsetof(CPUS390XState, machine_type));
3654 tcg_gen_deposit_i64(o->out, o->out, t1, 32, 32);
3655 tcg_temp_free_i64(t1);
3657 return NO_EXIT;
3660 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3662 check_privileged(s);
3663 gen_helper_spt(cpu_env, o->in2);
3664 return NO_EXIT;
3667 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3669 check_privileged(s);
3670 gen_helper_stfl(cpu_env);
3671 return NO_EXIT;
3674 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3676 check_privileged(s);
3677 gen_helper_stpt(o->out, cpu_env);
3678 return NO_EXIT;
3681 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3683 check_privileged(s);
3684 potential_page_fault(s);
3685 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3686 set_cc_static(s);
3687 return NO_EXIT;
3690 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3692 check_privileged(s);
3693 gen_helper_spx(cpu_env, o->in2);
3694 return NO_EXIT;
3697 static ExitStatus op_xsch(DisasContext *s, DisasOps *o)
3699 check_privileged(s);
3700 potential_page_fault(s);
3701 gen_helper_xsch(cpu_env, regs[1]);
3702 set_cc_static(s);
3703 return NO_EXIT;
3706 static ExitStatus op_csch(DisasContext *s, DisasOps *o)
3708 check_privileged(s);
3709 potential_page_fault(s);
3710 gen_helper_csch(cpu_env, regs[1]);
3711 set_cc_static(s);
3712 return NO_EXIT;
3715 static ExitStatus op_hsch(DisasContext *s, DisasOps *o)
3717 check_privileged(s);
3718 potential_page_fault(s);
3719 gen_helper_hsch(cpu_env, regs[1]);
3720 set_cc_static(s);
3721 return NO_EXIT;
3724 static ExitStatus op_msch(DisasContext *s, DisasOps *o)
3726 check_privileged(s);
3727 potential_page_fault(s);
3728 gen_helper_msch(cpu_env, regs[1], o->in2);
3729 set_cc_static(s);
3730 return NO_EXIT;
3733 static ExitStatus op_rchp(DisasContext *s, DisasOps *o)
3735 check_privileged(s);
3736 potential_page_fault(s);
3737 gen_helper_rchp(cpu_env, regs[1]);
3738 set_cc_static(s);
3739 return NO_EXIT;
3742 static ExitStatus op_rsch(DisasContext *s, DisasOps *o)
3744 check_privileged(s);
3745 potential_page_fault(s);
3746 gen_helper_rsch(cpu_env, regs[1]);
3747 set_cc_static(s);
3748 return NO_EXIT;
3751 static ExitStatus op_ssch(DisasContext *s, DisasOps *o)
3753 check_privileged(s);
3754 potential_page_fault(s);
3755 gen_helper_ssch(cpu_env, regs[1], o->in2);
3756 set_cc_static(s);
3757 return NO_EXIT;
3760 static ExitStatus op_stsch(DisasContext *s, DisasOps *o)
3762 check_privileged(s);
3763 potential_page_fault(s);
3764 gen_helper_stsch(cpu_env, regs[1], o->in2);
3765 set_cc_static(s);
3766 return NO_EXIT;
3769 static ExitStatus op_tsch(DisasContext *s, DisasOps *o)
3771 check_privileged(s);
3772 potential_page_fault(s);
3773 gen_helper_tsch(cpu_env, regs[1], o->in2);
3774 set_cc_static(s);
3775 return NO_EXIT;
3778 static ExitStatus op_chsc(DisasContext *s, DisasOps *o)
3780 check_privileged(s);
3781 potential_page_fault(s);
3782 gen_helper_chsc(cpu_env, o->in2);
3783 set_cc_static(s);
3784 return NO_EXIT;
3787 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
3789 check_privileged(s);
3790 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
3791 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
3792 return NO_EXIT;
3795 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3797 uint64_t i2 = get_field(s->fields, i2);
3798 TCGv_i64 t;
3800 check_privileged(s);
3802 /* It is important to do what the instruction name says: STORE THEN.
3803 If we let the output hook perform the store then if we fault and
3804 restart, we'll have the wrong SYSTEM MASK in place. */
3805 t = tcg_temp_new_i64();
3806 tcg_gen_shri_i64(t, psw_mask, 56);
3807 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3808 tcg_temp_free_i64(t);
3810 if (s->fields->op == 0xac) {
3811 tcg_gen_andi_i64(psw_mask, psw_mask,
3812 (i2 << 56) | 0x00ffffffffffffffull);
3813 } else {
3814 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3816 return NO_EXIT;
3819 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
3821 check_privileged(s);
3822 potential_page_fault(s);
3823 gen_helper_stura(cpu_env, o->in2, o->in1);
3824 return NO_EXIT;
3827 static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
3829 check_privileged(s);
3830 potential_page_fault(s);
3831 gen_helper_sturg(cpu_env, o->in2, o->in1);
3832 return NO_EXIT;
3834 #endif
3836 static ExitStatus op_stfle(DisasContext *s, DisasOps *o)
3838 potential_page_fault(s);
3839 gen_helper_stfle(cc_op, cpu_env, o->in2);
3840 set_cc_static(s);
3841 return NO_EXIT;
3844 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3846 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3847 return NO_EXIT;
3850 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3852 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3853 return NO_EXIT;
3856 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3858 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3859 return NO_EXIT;
3862 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3864 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3865 return NO_EXIT;
3868 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3870 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3871 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3872 potential_page_fault(s);
3873 gen_helper_stam(cpu_env, r1, o->in2, r3);
3874 tcg_temp_free_i32(r1);
3875 tcg_temp_free_i32(r3);
3876 return NO_EXIT;
3879 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3881 int m3 = get_field(s->fields, m3);
3882 int pos, base = s->insn->data;
3883 TCGv_i64 tmp = tcg_temp_new_i64();
3885 pos = base + ctz32(m3) * 8;
3886 switch (m3) {
3887 case 0xf:
3888 /* Effectively a 32-bit store. */
3889 tcg_gen_shri_i64(tmp, o->in1, pos);
3890 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3891 break;
3893 case 0xc:
3894 case 0x6:
3895 case 0x3:
3896 /* Effectively a 16-bit store. */
3897 tcg_gen_shri_i64(tmp, o->in1, pos);
3898 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3899 break;
3901 case 0x8:
3902 case 0x4:
3903 case 0x2:
3904 case 0x1:
3905 /* Effectively an 8-bit store. */
3906 tcg_gen_shri_i64(tmp, o->in1, pos);
3907 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3908 break;
3910 default:
3911 /* This is going to be a sequence of shifts and stores. */
3912 pos = base + 32 - 8;
3913 while (m3) {
3914 if (m3 & 0x8) {
3915 tcg_gen_shri_i64(tmp, o->in1, pos);
3916 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3917 tcg_gen_addi_i64(o->in2, o->in2, 1);
3919 m3 = (m3 << 1) & 0xf;
3920 pos -= 8;
3922 break;
3924 tcg_temp_free_i64(tmp);
3925 return NO_EXIT;
3928 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3930 int r1 = get_field(s->fields, r1);
3931 int r3 = get_field(s->fields, r3);
3932 int size = s->insn->data;
3933 TCGv_i64 tsize = tcg_const_i64(size);
3935 while (1) {
3936 if (size == 8) {
3937 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3938 } else {
3939 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3941 if (r1 == r3) {
3942 break;
3944 tcg_gen_add_i64(o->in2, o->in2, tsize);
3945 r1 = (r1 + 1) & 15;
3948 tcg_temp_free_i64(tsize);
3949 return NO_EXIT;
3952 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3954 int r1 = get_field(s->fields, r1);
3955 int r3 = get_field(s->fields, r3);
3956 TCGv_i64 t = tcg_temp_new_i64();
3957 TCGv_i64 t4 = tcg_const_i64(4);
3958 TCGv_i64 t32 = tcg_const_i64(32);
3960 while (1) {
3961 tcg_gen_shl_i64(t, regs[r1], t32);
3962 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3963 if (r1 == r3) {
3964 break;
3966 tcg_gen_add_i64(o->in2, o->in2, t4);
3967 r1 = (r1 + 1) & 15;
3970 tcg_temp_free_i64(t);
3971 tcg_temp_free_i64(t4);
3972 tcg_temp_free_i64(t32);
3973 return NO_EXIT;
3976 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3978 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3979 set_cc_static(s);
3980 return_low128(o->in2);
3981 return NO_EXIT;
3984 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3986 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3987 return NO_EXIT;
3990 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3992 DisasCompare cmp;
3993 TCGv_i64 borrow;
3995 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3997 /* The !borrow flag is the msb of CC. Since we want the inverse of
3998 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
3999 disas_jcc(s, &cmp, 8 | 4);
4000 borrow = tcg_temp_new_i64();
4001 if (cmp.is_64) {
4002 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
4003 } else {
4004 TCGv_i32 t = tcg_temp_new_i32();
4005 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
4006 tcg_gen_extu_i32_i64(borrow, t);
4007 tcg_temp_free_i32(t);
4009 free_compare(&cmp);
4011 tcg_gen_sub_i64(o->out, o->out, borrow);
4012 tcg_temp_free_i64(borrow);
4013 return NO_EXIT;
4016 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
4018 TCGv_i32 t;
4020 update_psw_addr(s);
4021 update_cc_op(s);
4023 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
4024 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4025 tcg_temp_free_i32(t);
4027 t = tcg_const_i32(s->next_pc - s->pc);
4028 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4029 tcg_temp_free_i32(t);
4031 gen_exception(EXCP_SVC);
4032 return EXIT_NORETURN;
4035 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
4037 gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4038 set_cc_static(s);
4039 return NO_EXIT;
4042 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
4044 gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4045 set_cc_static(s);
4046 return NO_EXIT;
4049 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
4051 gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4052 set_cc_static(s);
4053 return NO_EXIT;
4056 #ifndef CONFIG_USER_ONLY
4058 static ExitStatus op_testblock(DisasContext *s, DisasOps *o)
4060 check_privileged(s);
4061 potential_page_fault(s);
4062 gen_helper_testblock(cc_op, cpu_env, o->in2);
4063 set_cc_static(s);
4064 return NO_EXIT;
4067 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
4069 potential_page_fault(s);
4070 gen_helper_tprot(cc_op, o->addr1, o->in2);
4071 set_cc_static(s);
4072 return NO_EXIT;
4075 #endif
4077 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
4079 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4080 potential_page_fault(s);
4081 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4082 tcg_temp_free_i32(l);
4083 set_cc_static(s);
4084 return NO_EXIT;
4087 static ExitStatus op_tre(DisasContext *s, DisasOps *o)
4089 potential_page_fault(s);
4090 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4091 return_low128(o->out2);
4092 set_cc_static(s);
4093 return NO_EXIT;
4096 static ExitStatus op_trt(DisasContext *s, DisasOps *o)
4098 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4099 potential_page_fault(s);
4100 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4101 tcg_temp_free_i32(l);
4102 set_cc_static(s);
4103 return NO_EXIT;
4106 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
4108 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4109 potential_page_fault(s);
4110 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4111 tcg_temp_free_i32(l);
4112 return NO_EXIT;
4115 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
4117 int d1 = get_field(s->fields, d1);
4118 int d2 = get_field(s->fields, d2);
4119 int b1 = get_field(s->fields, b1);
4120 int b2 = get_field(s->fields, b2);
4121 int l = get_field(s->fields, l1);
4122 TCGv_i32 t32;
4124 o->addr1 = get_address(s, 0, b1, d1);
4126 /* If the addresses are identical, this is a store/memset of zero. */
4127 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4128 o->in2 = tcg_const_i64(0);
4130 l++;
4131 while (l >= 8) {
4132 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4133 l -= 8;
4134 if (l > 0) {
4135 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4138 if (l >= 4) {
4139 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4140 l -= 4;
4141 if (l > 0) {
4142 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4145 if (l >= 2) {
4146 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4147 l -= 2;
4148 if (l > 0) {
4149 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4152 if (l) {
4153 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4155 gen_op_movi_cc(s, 0);
4156 return NO_EXIT;
4159 /* But in general we'll defer to a helper. */
4160 o->in2 = get_address(s, 0, b2, d2);
4161 t32 = tcg_const_i32(l);
4162 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4163 tcg_temp_free_i32(t32);
4164 set_cc_static(s);
4165 return NO_EXIT;
4168 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
4170 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4171 return NO_EXIT;
4174 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
4176 int shift = s->insn->data & 0xff;
4177 int size = s->insn->data >> 8;
4178 uint64_t mask = ((1ull << size) - 1) << shift;
4180 assert(!o->g_in2);
4181 tcg_gen_shli_i64(o->in2, o->in2, shift);
4182 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4184 /* Produce the CC from only the bits manipulated. */
4185 tcg_gen_andi_i64(cc_dst, o->out, mask);
4186 set_cc_nz_u64(s, cc_dst);
4187 return NO_EXIT;
4190 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
4192 o->out = tcg_const_i64(0);
4193 return NO_EXIT;
4196 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
4198 o->out = tcg_const_i64(0);
4199 o->out2 = o->out;
4200 o->g_out2 = true;
4201 return NO_EXIT;
4204 /* ====================================================================== */
4205 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4206 the original inputs), update the various cc data structures in order to
4207 be able to compute the new condition code. */
4209 static void cout_abs32(DisasContext *s, DisasOps *o)
4211 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4214 static void cout_abs64(DisasContext *s, DisasOps *o)
4216 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4219 static void cout_adds32(DisasContext *s, DisasOps *o)
4221 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4224 static void cout_adds64(DisasContext *s, DisasOps *o)
4226 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4229 static void cout_addu32(DisasContext *s, DisasOps *o)
4231 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
4234 static void cout_addu64(DisasContext *s, DisasOps *o)
4236 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
4239 static void cout_addc32(DisasContext *s, DisasOps *o)
4241 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
4244 static void cout_addc64(DisasContext *s, DisasOps *o)
4246 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
4249 static void cout_cmps32(DisasContext *s, DisasOps *o)
4251 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4254 static void cout_cmps64(DisasContext *s, DisasOps *o)
4256 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4259 static void cout_cmpu32(DisasContext *s, DisasOps *o)
4261 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
4264 static void cout_cmpu64(DisasContext *s, DisasOps *o)
4266 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
4269 static void cout_f32(DisasContext *s, DisasOps *o)
4271 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
4274 static void cout_f64(DisasContext *s, DisasOps *o)
4276 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
4279 static void cout_f128(DisasContext *s, DisasOps *o)
4281 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
4284 static void cout_nabs32(DisasContext *s, DisasOps *o)
4286 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
4289 static void cout_nabs64(DisasContext *s, DisasOps *o)
4291 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
4294 static void cout_neg32(DisasContext *s, DisasOps *o)
4296 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
4299 static void cout_neg64(DisasContext *s, DisasOps *o)
4301 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
4304 static void cout_nz32(DisasContext *s, DisasOps *o)
4306 tcg_gen_ext32u_i64(cc_dst, o->out);
4307 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
4310 static void cout_nz64(DisasContext *s, DisasOps *o)
4312 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
4315 static void cout_s32(DisasContext *s, DisasOps *o)
4317 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
4320 static void cout_s64(DisasContext *s, DisasOps *o)
4322 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
4325 static void cout_subs32(DisasContext *s, DisasOps *o)
4327 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
4330 static void cout_subs64(DisasContext *s, DisasOps *o)
4332 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
4335 static void cout_subu32(DisasContext *s, DisasOps *o)
4337 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
4340 static void cout_subu64(DisasContext *s, DisasOps *o)
4342 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
4345 static void cout_subb32(DisasContext *s, DisasOps *o)
4347 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
4350 static void cout_subb64(DisasContext *s, DisasOps *o)
4352 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
4355 static void cout_tm32(DisasContext *s, DisasOps *o)
4357 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
4360 static void cout_tm64(DisasContext *s, DisasOps *o)
4362 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
4365 /* ====================================================================== */
4366 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4367 with the TCG register to which we will write. Used in combination with
4368 the "wout" generators, in some cases we need a new temporary, and in
4369 some cases we can write to a TCG global. */
4371 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
4373 o->out = tcg_temp_new_i64();
4375 #define SPEC_prep_new 0
4377 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
4379 o->out = tcg_temp_new_i64();
4380 o->out2 = tcg_temp_new_i64();
4382 #define SPEC_prep_new_P 0
4384 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4386 o->out = regs[get_field(f, r1)];
4387 o->g_out = true;
4389 #define SPEC_prep_r1 0
4391 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
4393 int r1 = get_field(f, r1);
4394 o->out = regs[r1];
4395 o->out2 = regs[r1 + 1];
4396 o->g_out = o->g_out2 = true;
4398 #define SPEC_prep_r1_P SPEC_r1_even
4400 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4402 o->out = fregs[get_field(f, r1)];
4403 o->g_out = true;
4405 #define SPEC_prep_f1 0
4407 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4409 int r1 = get_field(f, r1);
4410 o->out = fregs[r1];
4411 o->out2 = fregs[r1 + 2];
4412 o->g_out = o->g_out2 = true;
4414 #define SPEC_prep_x1 SPEC_r1_f128
4416 /* ====================================================================== */
4417 /* The "Write OUTput" generators. These generally perform some non-trivial
4418 copy of data to TCG globals, or to main memory. The trivial cases are
4419 generally handled by having a "prep" generator install the TCG global
4420 as the destination of the operation. */
4422 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4424 store_reg(get_field(f, r1), o->out);
4426 #define SPEC_wout_r1 0
4428 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4430 int r1 = get_field(f, r1);
4431 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
4433 #define SPEC_wout_r1_8 0
4435 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4437 int r1 = get_field(f, r1);
4438 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
4440 #define SPEC_wout_r1_16 0
4442 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4444 store_reg32_i64(get_field(f, r1), o->out);
4446 #define SPEC_wout_r1_32 0
4448 static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
4450 store_reg32h_i64(get_field(f, r1), o->out);
4452 #define SPEC_wout_r1_32h 0
4454 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4456 int r1 = get_field(f, r1);
4457 store_reg32_i64(r1, o->out);
4458 store_reg32_i64(r1 + 1, o->out2);
4460 #define SPEC_wout_r1_P32 SPEC_r1_even
4462 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4464 int r1 = get_field(f, r1);
4465 store_reg32_i64(r1 + 1, o->out);
4466 tcg_gen_shri_i64(o->out, o->out, 32);
4467 store_reg32_i64(r1, o->out);
4469 #define SPEC_wout_r1_D32 SPEC_r1_even
4471 static void wout_r3_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4473 int r3 = get_field(f, r3);
4474 store_reg32_i64(r3, o->out);
4475 store_reg32_i64(r3 + 1, o->out2);
4477 #define SPEC_wout_r3_P32 SPEC_r3_even
4479 static void wout_r3_P64(DisasContext *s, DisasFields *f, DisasOps *o)
4481 int r3 = get_field(f, r3);
4482 store_reg(r3, o->out);
4483 store_reg(r3 + 1, o->out2);
4485 #define SPEC_wout_r3_P64 SPEC_r3_even
4487 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4489 store_freg32_i64(get_field(f, r1), o->out);
4491 #define SPEC_wout_e1 0
4493 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4495 store_freg(get_field(f, r1), o->out);
4497 #define SPEC_wout_f1 0
4499 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4501 int f1 = get_field(s->fields, r1);
4502 store_freg(f1, o->out);
4503 store_freg(f1 + 2, o->out2);
4505 #define SPEC_wout_x1 SPEC_r1_f128
4507 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4509 if (get_field(f, r1) != get_field(f, r2)) {
4510 store_reg32_i64(get_field(f, r1), o->out);
4513 #define SPEC_wout_cond_r1r2_32 0
4515 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
4517 if (get_field(f, r1) != get_field(f, r2)) {
4518 store_freg32_i64(get_field(f, r1), o->out);
4521 #define SPEC_wout_cond_e1e2 0
4523 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4525 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
4527 #define SPEC_wout_m1_8 0
4529 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4531 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
4533 #define SPEC_wout_m1_16 0
4535 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4537 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4539 #define SPEC_wout_m1_32 0
4541 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4543 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4545 #define SPEC_wout_m1_64 0
4547 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4549 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
4551 #define SPEC_wout_m2_32 0
4553 static void wout_in2_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4555 store_reg(get_field(f, r1), o->in2);
4557 #define SPEC_wout_in2_r1 0
4559 static void wout_in2_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4561 store_reg32_i64(get_field(f, r1), o->in2);
4563 #define SPEC_wout_in2_r1_32 0
4565 /* ====================================================================== */
4566 /* The "INput 1" generators. These load the first operand to an insn. */
4568 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4570 o->in1 = load_reg(get_field(f, r1));
4572 #define SPEC_in1_r1 0
4574 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4576 o->in1 = regs[get_field(f, r1)];
4577 o->g_in1 = true;
4579 #define SPEC_in1_r1_o 0
4581 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4583 o->in1 = tcg_temp_new_i64();
4584 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
4586 #define SPEC_in1_r1_32s 0
4588 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4590 o->in1 = tcg_temp_new_i64();
4591 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
4593 #define SPEC_in1_r1_32u 0
4595 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4597 o->in1 = tcg_temp_new_i64();
4598 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
4600 #define SPEC_in1_r1_sr32 0
4602 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
4604 o->in1 = load_reg(get_field(f, r1) + 1);
4606 #define SPEC_in1_r1p1 SPEC_r1_even
4608 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4610 o->in1 = tcg_temp_new_i64();
4611 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
4613 #define SPEC_in1_r1p1_32s SPEC_r1_even
4615 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4617 o->in1 = tcg_temp_new_i64();
4618 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
4620 #define SPEC_in1_r1p1_32u SPEC_r1_even
4622 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4624 int r1 = get_field(f, r1);
4625 o->in1 = tcg_temp_new_i64();
4626 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
4628 #define SPEC_in1_r1_D32 SPEC_r1_even
4630 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4632 o->in1 = load_reg(get_field(f, r2));
4634 #define SPEC_in1_r2 0
4636 static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4638 o->in1 = tcg_temp_new_i64();
4639 tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
4641 #define SPEC_in1_r2_sr32 0
4643 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4645 o->in1 = load_reg(get_field(f, r3));
4647 #define SPEC_in1_r3 0
4649 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4651 o->in1 = regs[get_field(f, r3)];
4652 o->g_in1 = true;
4654 #define SPEC_in1_r3_o 0
4656 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4658 o->in1 = tcg_temp_new_i64();
4659 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
4661 #define SPEC_in1_r3_32s 0
4663 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4665 o->in1 = tcg_temp_new_i64();
4666 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
4668 #define SPEC_in1_r3_32u 0
4670 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4672 int r3 = get_field(f, r3);
4673 o->in1 = tcg_temp_new_i64();
4674 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
4676 #define SPEC_in1_r3_D32 SPEC_r3_even
4678 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4680 o->in1 = load_freg32_i64(get_field(f, r1));
4682 #define SPEC_in1_e1 0
4684 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4686 o->in1 = fregs[get_field(f, r1)];
4687 o->g_in1 = true;
4689 #define SPEC_in1_f1_o 0
4691 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4693 int r1 = get_field(f, r1);
4694 o->out = fregs[r1];
4695 o->out2 = fregs[r1 + 2];
4696 o->g_out = o->g_out2 = true;
4698 #define SPEC_in1_x1_o SPEC_r1_f128
4700 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4702 o->in1 = fregs[get_field(f, r3)];
4703 o->g_in1 = true;
4705 #define SPEC_in1_f3_o 0
4707 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
4709 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
4711 #define SPEC_in1_la1 0
4713 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
4715 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4716 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4718 #define SPEC_in1_la2 0
4720 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4722 in1_la1(s, f, o);
4723 o->in1 = tcg_temp_new_i64();
4724 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
4726 #define SPEC_in1_m1_8u 0
4728 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4730 in1_la1(s, f, o);
4731 o->in1 = tcg_temp_new_i64();
4732 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
4734 #define SPEC_in1_m1_16s 0
4736 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4738 in1_la1(s, f, o);
4739 o->in1 = tcg_temp_new_i64();
4740 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
4742 #define SPEC_in1_m1_16u 0
4744 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4746 in1_la1(s, f, o);
4747 o->in1 = tcg_temp_new_i64();
4748 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4750 #define SPEC_in1_m1_32s 0
4752 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4754 in1_la1(s, f, o);
4755 o->in1 = tcg_temp_new_i64();
4756 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4758 #define SPEC_in1_m1_32u 0
4760 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4762 in1_la1(s, f, o);
4763 o->in1 = tcg_temp_new_i64();
4764 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4766 #define SPEC_in1_m1_64 0
4768 /* ====================================================================== */
4769 /* The "INput 2" generators. These load the second operand to an insn. */
4771 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4773 o->in2 = regs[get_field(f, r1)];
4774 o->g_in2 = true;
4776 #define SPEC_in2_r1_o 0
4778 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4780 o->in2 = tcg_temp_new_i64();
4781 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
4783 #define SPEC_in2_r1_16u 0
4785 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4787 o->in2 = tcg_temp_new_i64();
4788 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
4790 #define SPEC_in2_r1_32u 0
4792 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4794 int r1 = get_field(f, r1);
4795 o->in2 = tcg_temp_new_i64();
4796 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
4798 #define SPEC_in2_r1_D32 SPEC_r1_even
4800 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4802 o->in2 = load_reg(get_field(f, r2));
4804 #define SPEC_in2_r2 0
4806 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4808 o->in2 = regs[get_field(f, r2)];
4809 o->g_in2 = true;
4811 #define SPEC_in2_r2_o 0
4813 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4815 int r2 = get_field(f, r2);
4816 if (r2 != 0) {
4817 o->in2 = load_reg(r2);
4820 #define SPEC_in2_r2_nz 0
4822 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4824 o->in2 = tcg_temp_new_i64();
4825 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4827 #define SPEC_in2_r2_8s 0
4829 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4831 o->in2 = tcg_temp_new_i64();
4832 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4834 #define SPEC_in2_r2_8u 0
4836 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4838 o->in2 = tcg_temp_new_i64();
4839 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4841 #define SPEC_in2_r2_16s 0
4843 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4845 o->in2 = tcg_temp_new_i64();
4846 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4848 #define SPEC_in2_r2_16u 0
4850 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4852 o->in2 = load_reg(get_field(f, r3));
4854 #define SPEC_in2_r3 0
4856 static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4858 o->in2 = tcg_temp_new_i64();
4859 tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
4861 #define SPEC_in2_r3_sr32 0
4863 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4865 o->in2 = tcg_temp_new_i64();
4866 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4868 #define SPEC_in2_r2_32s 0
4870 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4872 o->in2 = tcg_temp_new_i64();
4873 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4875 #define SPEC_in2_r2_32u 0
4877 static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4879 o->in2 = tcg_temp_new_i64();
4880 tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
4882 #define SPEC_in2_r2_sr32 0
4884 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
4886 o->in2 = load_freg32_i64(get_field(f, r2));
4888 #define SPEC_in2_e2 0
4890 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4892 o->in2 = fregs[get_field(f, r2)];
4893 o->g_in2 = true;
4895 #define SPEC_in2_f2_o 0
4897 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4899 int r2 = get_field(f, r2);
4900 o->in1 = fregs[r2];
4901 o->in2 = fregs[r2 + 2];
4902 o->g_in1 = o->g_in2 = true;
4904 #define SPEC_in2_x2_o SPEC_r2_f128
4906 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
4908 o->in2 = get_address(s, 0, get_field(f, r2), 0);
4910 #define SPEC_in2_ra2 0
4912 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
4914 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4915 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4917 #define SPEC_in2_a2 0
4919 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
4921 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
4923 #define SPEC_in2_ri2 0
4925 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
4927 help_l2_shift(s, f, o, 31);
4929 #define SPEC_in2_sh32 0
4931 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
4933 help_l2_shift(s, f, o, 63);
4935 #define SPEC_in2_sh64 0
4937 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4939 in2_a2(s, f, o);
4940 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
4942 #define SPEC_in2_m2_8u 0
4944 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4946 in2_a2(s, f, o);
4947 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
4949 #define SPEC_in2_m2_16s 0
4951 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4953 in2_a2(s, f, o);
4954 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4956 #define SPEC_in2_m2_16u 0
4958 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4960 in2_a2(s, f, o);
4961 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4963 #define SPEC_in2_m2_32s 0
4965 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4967 in2_a2(s, f, o);
4968 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4970 #define SPEC_in2_m2_32u 0
4972 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4974 in2_a2(s, f, o);
4975 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4977 #define SPEC_in2_m2_64 0
4979 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4981 in2_ri2(s, f, o);
4982 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4984 #define SPEC_in2_mri2_16u 0
4986 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4988 in2_ri2(s, f, o);
4989 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4991 #define SPEC_in2_mri2_32s 0
4993 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4995 in2_ri2(s, f, o);
4996 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4998 #define SPEC_in2_mri2_32u 0
5000 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5002 in2_ri2(s, f, o);
5003 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5005 #define SPEC_in2_mri2_64 0
5007 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
5009 o->in2 = tcg_const_i64(get_field(f, i2));
5011 #define SPEC_in2_i2 0
5013 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5015 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
5017 #define SPEC_in2_i2_8u 0
5019 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5021 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
5023 #define SPEC_in2_i2_16u 0
5025 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5027 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
5029 #define SPEC_in2_i2_32u 0
5031 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5033 uint64_t i2 = (uint16_t)get_field(f, i2);
5034 o->in2 = tcg_const_i64(i2 << s->insn->data);
5036 #define SPEC_in2_i2_16u_shl 0
5038 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5040 uint64_t i2 = (uint32_t)get_field(f, i2);
5041 o->in2 = tcg_const_i64(i2 << s->insn->data);
5043 #define SPEC_in2_i2_32u_shl 0
5045 #ifndef CONFIG_USER_ONLY
5046 static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
5048 o->in2 = tcg_const_i64(s->fields->raw_insn);
5050 #define SPEC_in2_insn 0
5051 #endif
5053 /* ====================================================================== */
5055 /* Find opc within the table of insns. This is formulated as a switch
5056 statement so that (1) we get compile-time notice of cut-paste errors
5057 for duplicated opcodes, and (2) the compiler generates the binary
5058 search tree, rather than us having to post-process the table. */
5060 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5061 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5063 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5065 enum DisasInsnEnum {
5066 #include "insn-data.def"
5069 #undef D
5070 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5071 .opc = OPC, \
5072 .fmt = FMT_##FT, \
5073 .fac = FAC_##FC, \
5074 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5075 .name = #NM, \
5076 .help_in1 = in1_##I1, \
5077 .help_in2 = in2_##I2, \
5078 .help_prep = prep_##P, \
5079 .help_wout = wout_##W, \
5080 .help_cout = cout_##CC, \
5081 .help_op = op_##OP, \
5082 .data = D \
5085 /* Allow 0 to be used for NULL in the table below. */
5086 #define in1_0 NULL
5087 #define in2_0 NULL
5088 #define prep_0 NULL
5089 #define wout_0 NULL
5090 #define cout_0 NULL
5091 #define op_0 NULL
5093 #define SPEC_in1_0 0
5094 #define SPEC_in2_0 0
5095 #define SPEC_prep_0 0
5096 #define SPEC_wout_0 0
5098 static const DisasInsn insn_info[] = {
5099 #include "insn-data.def"
5102 #undef D
5103 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5104 case OPC: return &insn_info[insn_ ## NM];
5106 static const DisasInsn *lookup_opc(uint16_t opc)
5108 switch (opc) {
5109 #include "insn-data.def"
5110 default:
5111 return NULL;
5115 #undef D
5116 #undef C
5118 /* Extract a field from the insn. The INSN should be left-aligned in
5119 the uint64_t so that we can more easily utilize the big-bit-endian
5120 definitions we extract from the Principals of Operation. */
5122 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
5124 uint32_t r, m;
5126 if (f->size == 0) {
5127 return;
5130 /* Zero extract the field from the insn. */
5131 r = (insn << f->beg) >> (64 - f->size);
5133 /* Sign-extend, or un-swap the field as necessary. */
5134 switch (f->type) {
5135 case 0: /* unsigned */
5136 break;
5137 case 1: /* signed */
5138 assert(f->size <= 32);
5139 m = 1u << (f->size - 1);
5140 r = (r ^ m) - m;
5141 break;
5142 case 2: /* dl+dh split, signed 20 bit. */
5143 r = ((int8_t)r << 12) | (r >> 8);
5144 break;
5145 default:
5146 abort();
5149 /* Validate that the "compressed" encoding we selected above is valid.
5150 I.e. we havn't make two different original fields overlap. */
5151 assert(((o->presentC >> f->indexC) & 1) == 0);
5152 o->presentC |= 1 << f->indexC;
5153 o->presentO |= 1 << f->indexO;
5155 o->c[f->indexC] = r;
5158 /* Lookup the insn at the current PC, extracting the operands into O and
5159 returning the info struct for the insn. Returns NULL for invalid insn. */
5161 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
5162 DisasFields *f)
5164 uint64_t insn, pc = s->pc;
5165 int op, op2, ilen;
5166 const DisasInsn *info;
5168 insn = ld_code2(env, pc);
5169 op = (insn >> 8) & 0xff;
5170 ilen = get_ilen(op);
5171 s->next_pc = s->pc + ilen;
5173 switch (ilen) {
5174 case 2:
5175 insn = insn << 48;
5176 break;
5177 case 4:
5178 insn = ld_code4(env, pc) << 32;
5179 break;
5180 case 6:
5181 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
5182 break;
5183 default:
5184 abort();
5187 /* We can't actually determine the insn format until we've looked up
5188 the full insn opcode. Which we can't do without locating the
5189 secondary opcode. Assume by default that OP2 is at bit 40; for
5190 those smaller insns that don't actually have a secondary opcode
5191 this will correctly result in OP2 = 0. */
5192 switch (op) {
5193 case 0x01: /* E */
5194 case 0x80: /* S */
5195 case 0x82: /* S */
5196 case 0x93: /* S */
5197 case 0xb2: /* S, RRF, RRE */
5198 case 0xb3: /* RRE, RRD, RRF */
5199 case 0xb9: /* RRE, RRF */
5200 case 0xe5: /* SSE, SIL */
5201 op2 = (insn << 8) >> 56;
5202 break;
5203 case 0xa5: /* RI */
5204 case 0xa7: /* RI */
5205 case 0xc0: /* RIL */
5206 case 0xc2: /* RIL */
5207 case 0xc4: /* RIL */
5208 case 0xc6: /* RIL */
5209 case 0xc8: /* SSF */
5210 case 0xcc: /* RIL */
5211 op2 = (insn << 12) >> 60;
5212 break;
5213 case 0xd0 ... 0xdf: /* SS */
5214 case 0xe1: /* SS */
5215 case 0xe2: /* SS */
5216 case 0xe8: /* SS */
5217 case 0xe9: /* SS */
5218 case 0xea: /* SS */
5219 case 0xee ... 0xf3: /* SS */
5220 case 0xf8 ... 0xfd: /* SS */
5221 op2 = 0;
5222 break;
5223 default:
5224 op2 = (insn << 40) >> 56;
5225 break;
5228 memset(f, 0, sizeof(*f));
5229 f->raw_insn = insn;
5230 f->op = op;
5231 f->op2 = op2;
5233 /* Lookup the instruction. */
5234 info = lookup_opc(op << 8 | op2);
5236 /* If we found it, extract the operands. */
5237 if (info != NULL) {
5238 DisasFormat fmt = info->fmt;
5239 int i;
5241 for (i = 0; i < NUM_C_FIELD; ++i) {
5242 extract_field(f, &format_info[fmt].op[i], insn);
5245 return info;
5248 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
5250 const DisasInsn *insn;
5251 ExitStatus ret = NO_EXIT;
5252 DisasFields f;
5253 DisasOps o;
5255 /* Search for the insn in the table. */
5256 insn = extract_insn(env, s, &f);
5258 /* Not found means unimplemented/illegal opcode. */
5259 if (insn == NULL) {
5260 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
5261 f.op, f.op2);
5262 gen_illegal_opcode(s);
5263 return EXIT_NORETURN;
5266 #ifndef CONFIG_USER_ONLY
5267 if (s->tb->flags & FLAG_MASK_PER) {
5268 TCGv_i64 addr = tcg_const_i64(s->pc);
5269 gen_helper_per_ifetch(cpu_env, addr);
5270 tcg_temp_free_i64(addr);
5272 #endif
5274 /* Check for insn specification exceptions. */
5275 if (insn->spec) {
5276 int spec = insn->spec, excp = 0, r;
5278 if (spec & SPEC_r1_even) {
5279 r = get_field(&f, r1);
5280 if (r & 1) {
5281 excp = PGM_SPECIFICATION;
5284 if (spec & SPEC_r2_even) {
5285 r = get_field(&f, r2);
5286 if (r & 1) {
5287 excp = PGM_SPECIFICATION;
5290 if (spec & SPEC_r3_even) {
5291 r = get_field(&f, r3);
5292 if (r & 1) {
5293 excp = PGM_SPECIFICATION;
5296 if (spec & SPEC_r1_f128) {
5297 r = get_field(&f, r1);
5298 if (r > 13) {
5299 excp = PGM_SPECIFICATION;
5302 if (spec & SPEC_r2_f128) {
5303 r = get_field(&f, r2);
5304 if (r > 13) {
5305 excp = PGM_SPECIFICATION;
5308 if (excp) {
5309 gen_program_exception(s, excp);
5310 return EXIT_NORETURN;
5314 /* Set up the strutures we use to communicate with the helpers. */
5315 s->insn = insn;
5316 s->fields = &f;
5317 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
5318 TCGV_UNUSED_I64(o.out);
5319 TCGV_UNUSED_I64(o.out2);
5320 TCGV_UNUSED_I64(o.in1);
5321 TCGV_UNUSED_I64(o.in2);
5322 TCGV_UNUSED_I64(o.addr1);
5324 /* Implement the instruction. */
5325 if (insn->help_in1) {
5326 insn->help_in1(s, &f, &o);
5328 if (insn->help_in2) {
5329 insn->help_in2(s, &f, &o);
5331 if (insn->help_prep) {
5332 insn->help_prep(s, &f, &o);
5334 if (insn->help_op) {
5335 ret = insn->help_op(s, &o);
5337 if (insn->help_wout) {
5338 insn->help_wout(s, &f, &o);
5340 if (insn->help_cout) {
5341 insn->help_cout(s, &o);
5344 /* Free any temporaries created by the helpers. */
5345 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
5346 tcg_temp_free_i64(o.out);
5348 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
5349 tcg_temp_free_i64(o.out2);
5351 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
5352 tcg_temp_free_i64(o.in1);
5354 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
5355 tcg_temp_free_i64(o.in2);
5357 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
5358 tcg_temp_free_i64(o.addr1);
5361 #ifndef CONFIG_USER_ONLY
5362 if (s->tb->flags & FLAG_MASK_PER) {
5363 /* An exception might be triggered, save PSW if not already done. */
5364 if (ret == NO_EXIT || ret == EXIT_PC_STALE) {
5365 tcg_gen_movi_i64(psw_addr, s->next_pc);
5368 /* Save off cc. */
5369 update_cc_op(s);
5371 /* Call the helper to check for a possible PER exception. */
5372 gen_helper_per_check_exception(cpu_env);
5374 #endif
5376 /* Advance to the next instruction. */
5377 s->pc = s->next_pc;
5378 return ret;
5381 void gen_intermediate_code(CPUS390XState *env, struct TranslationBlock *tb)
5383 S390CPU *cpu = s390_env_get_cpu(env);
5384 CPUState *cs = CPU(cpu);
5385 DisasContext dc;
5386 target_ulong pc_start;
5387 uint64_t next_page_start;
5388 int num_insns, max_insns;
5389 ExitStatus status;
5390 bool do_debug;
5392 pc_start = tb->pc;
5394 /* 31-bit mode */
5395 if (!(tb->flags & FLAG_MASK_64)) {
5396 pc_start &= 0x7fffffff;
5399 dc.tb = tb;
5400 dc.pc = pc_start;
5401 dc.cc_op = CC_OP_DYNAMIC;
5402 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
5404 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
5406 num_insns = 0;
5407 max_insns = tb->cflags & CF_COUNT_MASK;
5408 if (max_insns == 0) {
5409 max_insns = CF_COUNT_MASK;
5411 if (max_insns > TCG_MAX_INSNS) {
5412 max_insns = TCG_MAX_INSNS;
5415 gen_tb_start(tb);
5417 do {
5418 tcg_gen_insn_start(dc.pc, dc.cc_op);
5419 num_insns++;
5421 if (unlikely(cpu_breakpoint_test(cs, dc.pc, BP_ANY))) {
5422 status = EXIT_PC_STALE;
5423 do_debug = true;
5424 /* The address covered by the breakpoint must be included in
5425 [tb->pc, tb->pc + tb->size) in order to for it to be
5426 properly cleared -- thus we increment the PC here so that
5427 the logic setting tb->size below does the right thing. */
5428 dc.pc += 2;
5429 break;
5432 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5433 gen_io_start();
5436 status = NO_EXIT;
5437 if (status == NO_EXIT) {
5438 status = translate_one(env, &dc);
5441 /* If we reach a page boundary, are single stepping,
5442 or exhaust instruction count, stop generation. */
5443 if (status == NO_EXIT
5444 && (dc.pc >= next_page_start
5445 || tcg_op_buf_full()
5446 || num_insns >= max_insns
5447 || singlestep
5448 || cs->singlestep_enabled)) {
5449 status = EXIT_PC_STALE;
5451 } while (status == NO_EXIT);
5453 if (tb->cflags & CF_LAST_IO) {
5454 gen_io_end();
5457 switch (status) {
5458 case EXIT_GOTO_TB:
5459 case EXIT_NORETURN:
5460 break;
5461 case EXIT_PC_STALE:
5462 update_psw_addr(&dc);
5463 /* FALLTHRU */
5464 case EXIT_PC_UPDATED:
5465 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5466 cc op type is in env */
5467 update_cc_op(&dc);
5468 /* Exit the TB, either by raising a debug exception or by return. */
5469 if (do_debug) {
5470 gen_exception(EXCP_DEBUG);
5471 } else if (use_exit_tb(&dc)) {
5472 tcg_gen_exit_tb(0);
5473 } else {
5474 tcg_gen_lookup_and_goto_ptr(psw_addr);
5476 break;
5477 default:
5478 abort();
5481 gen_tb_end(tb, num_insns);
5483 tb->size = dc.pc - pc_start;
5484 tb->icount = num_insns;
5486 #if defined(S390X_DEBUG_DISAS)
5487 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
5488 && qemu_log_in_addr_range(pc_start)) {
5489 qemu_log_lock();
5490 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5491 log_target_disas(cs, pc_start, dc.pc - pc_start, 1);
5492 qemu_log("\n");
5493 qemu_log_unlock();
5495 #endif
5498 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
5499 target_ulong *data)
5501 int cc_op = data[1];
5502 env->psw.addr = data[0];
5503 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
5504 env->cc_op = cc_op;