target/s390x: Use unwind data for helper_mvcle
[qemu.git] / target / s390x / translate.c
blobeaa3adcc784015db32c3c7123968612dd2848b7c
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "disas/disas.h"
34 #include "exec/exec-all.h"
35 #include "tcg-op.h"
36 #include "qemu/log.h"
37 #include "qemu/host-utils.h"
38 #include "exec/cpu_ldst.h"
40 /* global register indexes */
41 static TCGv_env cpu_env;
43 #include "exec/gen-icount.h"
44 #include "exec/helper-proto.h"
45 #include "exec/helper-gen.h"
47 #include "trace-tcg.h"
48 #include "exec/log.h"
51 /* Information that (most) every instruction needs to manipulate. */
52 typedef struct DisasContext DisasContext;
53 typedef struct DisasInsn DisasInsn;
54 typedef struct DisasFields DisasFields;
56 struct DisasContext {
57 struct TranslationBlock *tb;
58 const DisasInsn *insn;
59 DisasFields *fields;
60 uint64_t pc, next_pc;
61 enum cc_op cc_op;
62 bool singlestep_enabled;
65 /* Information carried about a condition to be evaluated. */
66 typedef struct {
67 TCGCond cond:8;
68 bool is_64;
69 bool g1;
70 bool g2;
71 union {
72 struct { TCGv_i64 a, b; } s64;
73 struct { TCGv_i32 a, b; } s32;
74 } u;
75 } DisasCompare;
77 #define DISAS_EXCP 4
79 #ifdef DEBUG_INLINE_BRANCHES
80 static uint64_t inline_branch_hit[CC_OP_MAX];
81 static uint64_t inline_branch_miss[CC_OP_MAX];
82 #endif
84 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
86 if (!(s->tb->flags & FLAG_MASK_64)) {
87 if (s->tb->flags & FLAG_MASK_32) {
88 return pc | 0x80000000;
91 return pc;
94 void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
95 int flags)
97 S390CPU *cpu = S390_CPU(cs);
98 CPUS390XState *env = &cpu->env;
99 int i;
101 if (env->cc_op > 3) {
102 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
103 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
104 } else {
105 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
106 env->psw.mask, env->psw.addr, env->cc_op);
109 for (i = 0; i < 16; i++) {
110 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
111 if ((i % 4) == 3) {
112 cpu_fprintf(f, "\n");
113 } else {
114 cpu_fprintf(f, " ");
118 for (i = 0; i < 16; i++) {
119 cpu_fprintf(f, "F%02d=%016" PRIx64, i, get_freg(env, i)->ll);
120 if ((i % 4) == 3) {
121 cpu_fprintf(f, "\n");
122 } else {
123 cpu_fprintf(f, " ");
127 for (i = 0; i < 32; i++) {
128 cpu_fprintf(f, "V%02d=%016" PRIx64 "%016" PRIx64, i,
129 env->vregs[i][0].ll, env->vregs[i][1].ll);
130 cpu_fprintf(f, (i % 2) ? "\n" : " ");
133 #ifndef CONFIG_USER_ONLY
134 for (i = 0; i < 16; i++) {
135 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
136 if ((i % 4) == 3) {
137 cpu_fprintf(f, "\n");
138 } else {
139 cpu_fprintf(f, " ");
142 #endif
144 #ifdef DEBUG_INLINE_BRANCHES
145 for (i = 0; i < CC_OP_MAX; i++) {
146 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
147 inline_branch_miss[i], inline_branch_hit[i]);
149 #endif
151 cpu_fprintf(f, "\n");
154 static TCGv_i64 psw_addr;
155 static TCGv_i64 psw_mask;
156 static TCGv_i64 gbea;
158 static TCGv_i32 cc_op;
159 static TCGv_i64 cc_src;
160 static TCGv_i64 cc_dst;
161 static TCGv_i64 cc_vr;
163 static char cpu_reg_names[32][4];
164 static TCGv_i64 regs[16];
165 static TCGv_i64 fregs[16];
167 void s390x_translate_init(void)
169 int i;
171 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
172 tcg_ctx.tcg_env = cpu_env;
173 psw_addr = tcg_global_mem_new_i64(cpu_env,
174 offsetof(CPUS390XState, psw.addr),
175 "psw_addr");
176 psw_mask = tcg_global_mem_new_i64(cpu_env,
177 offsetof(CPUS390XState, psw.mask),
178 "psw_mask");
179 gbea = tcg_global_mem_new_i64(cpu_env,
180 offsetof(CPUS390XState, gbea),
181 "gbea");
183 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
184 "cc_op");
185 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
186 "cc_src");
187 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
188 "cc_dst");
189 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
190 "cc_vr");
192 for (i = 0; i < 16; i++) {
193 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
194 regs[i] = tcg_global_mem_new(cpu_env,
195 offsetof(CPUS390XState, regs[i]),
196 cpu_reg_names[i]);
199 for (i = 0; i < 16; i++) {
200 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
201 fregs[i] = tcg_global_mem_new(cpu_env,
202 offsetof(CPUS390XState, vregs[i][0].d),
203 cpu_reg_names[i + 16]);
207 static TCGv_i64 load_reg(int reg)
209 TCGv_i64 r = tcg_temp_new_i64();
210 tcg_gen_mov_i64(r, regs[reg]);
211 return r;
214 static TCGv_i64 load_freg32_i64(int reg)
216 TCGv_i64 r = tcg_temp_new_i64();
217 tcg_gen_shri_i64(r, fregs[reg], 32);
218 return r;
221 static void store_reg(int reg, TCGv_i64 v)
223 tcg_gen_mov_i64(regs[reg], v);
226 static void store_freg(int reg, TCGv_i64 v)
228 tcg_gen_mov_i64(fregs[reg], v);
231 static void store_reg32_i64(int reg, TCGv_i64 v)
233 /* 32 bit register writes keep the upper half */
234 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
237 static void store_reg32h_i64(int reg, TCGv_i64 v)
239 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
242 static void store_freg32_i64(int reg, TCGv_i64 v)
244 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
247 static void return_low128(TCGv_i64 dest)
249 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
252 static void update_psw_addr(DisasContext *s)
254 /* psw.addr */
255 tcg_gen_movi_i64(psw_addr, s->pc);
258 static void per_branch(DisasContext *s, bool to_next)
260 #ifndef CONFIG_USER_ONLY
261 tcg_gen_movi_i64(gbea, s->pc);
263 if (s->tb->flags & FLAG_MASK_PER) {
264 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->next_pc) : psw_addr;
265 gen_helper_per_branch(cpu_env, gbea, next_pc);
266 if (to_next) {
267 tcg_temp_free_i64(next_pc);
270 #endif
273 static void per_branch_cond(DisasContext *s, TCGCond cond,
274 TCGv_i64 arg1, TCGv_i64 arg2)
276 #ifndef CONFIG_USER_ONLY
277 if (s->tb->flags & FLAG_MASK_PER) {
278 TCGLabel *lab = gen_new_label();
279 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
281 tcg_gen_movi_i64(gbea, s->pc);
282 gen_helper_per_branch(cpu_env, gbea, psw_addr);
284 gen_set_label(lab);
285 } else {
286 TCGv_i64 pc = tcg_const_i64(s->pc);
287 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
288 tcg_temp_free_i64(pc);
290 #endif
293 static void per_breaking_event(DisasContext *s)
295 tcg_gen_movi_i64(gbea, s->pc);
298 static void update_cc_op(DisasContext *s)
300 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
301 tcg_gen_movi_i32(cc_op, s->cc_op);
305 static void potential_page_fault(DisasContext *s)
307 update_psw_addr(s);
308 update_cc_op(s);
311 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
313 return (uint64_t)cpu_lduw_code(env, pc);
316 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
318 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
321 static int get_mem_index(DisasContext *s)
323 switch (s->tb->flags & FLAG_MASK_ASC) {
324 case PSW_ASC_PRIMARY >> 32:
325 return 0;
326 case PSW_ASC_SECONDARY >> 32:
327 return 1;
328 case PSW_ASC_HOME >> 32:
329 return 2;
330 default:
331 tcg_abort();
332 break;
336 static void gen_exception(int excp)
338 TCGv_i32 tmp = tcg_const_i32(excp);
339 gen_helper_exception(cpu_env, tmp);
340 tcg_temp_free_i32(tmp);
343 static void gen_program_exception(DisasContext *s, int code)
345 TCGv_i32 tmp;
347 /* Remember what pgm exeption this was. */
348 tmp = tcg_const_i32(code);
349 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
350 tcg_temp_free_i32(tmp);
352 tmp = tcg_const_i32(s->next_pc - s->pc);
353 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
354 tcg_temp_free_i32(tmp);
356 /* Advance past instruction. */
357 s->pc = s->next_pc;
358 update_psw_addr(s);
360 /* Save off cc. */
361 update_cc_op(s);
363 /* Trigger exception. */
364 gen_exception(EXCP_PGM);
367 static inline void gen_illegal_opcode(DisasContext *s)
369 gen_program_exception(s, PGM_OPERATION);
372 static inline void gen_trap(DisasContext *s)
374 TCGv_i32 t;
376 /* Set DXC to 0xff. */
377 t = tcg_temp_new_i32();
378 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
379 tcg_gen_ori_i32(t, t, 0xff00);
380 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
381 tcg_temp_free_i32(t);
383 gen_program_exception(s, PGM_DATA);
386 #ifndef CONFIG_USER_ONLY
387 static void check_privileged(DisasContext *s)
389 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
390 gen_program_exception(s, PGM_PRIVILEGED);
393 #endif
395 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
397 TCGv_i64 tmp = tcg_temp_new_i64();
398 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
400 /* Note that d2 is limited to 20 bits, signed. If we crop negative
401 displacements early we create larger immedate addends. */
403 /* Note that addi optimizes the imm==0 case. */
404 if (b2 && x2) {
405 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
406 tcg_gen_addi_i64(tmp, tmp, d2);
407 } else if (b2) {
408 tcg_gen_addi_i64(tmp, regs[b2], d2);
409 } else if (x2) {
410 tcg_gen_addi_i64(tmp, regs[x2], d2);
411 } else {
412 if (need_31) {
413 d2 &= 0x7fffffff;
414 need_31 = false;
416 tcg_gen_movi_i64(tmp, d2);
418 if (need_31) {
419 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
422 return tmp;
425 static inline bool live_cc_data(DisasContext *s)
427 return (s->cc_op != CC_OP_DYNAMIC
428 && s->cc_op != CC_OP_STATIC
429 && s->cc_op > 3);
432 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
434 if (live_cc_data(s)) {
435 tcg_gen_discard_i64(cc_src);
436 tcg_gen_discard_i64(cc_dst);
437 tcg_gen_discard_i64(cc_vr);
439 s->cc_op = CC_OP_CONST0 + val;
442 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
444 if (live_cc_data(s)) {
445 tcg_gen_discard_i64(cc_src);
446 tcg_gen_discard_i64(cc_vr);
448 tcg_gen_mov_i64(cc_dst, dst);
449 s->cc_op = op;
452 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
453 TCGv_i64 dst)
455 if (live_cc_data(s)) {
456 tcg_gen_discard_i64(cc_vr);
458 tcg_gen_mov_i64(cc_src, src);
459 tcg_gen_mov_i64(cc_dst, dst);
460 s->cc_op = op;
463 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
464 TCGv_i64 dst, TCGv_i64 vr)
466 tcg_gen_mov_i64(cc_src, src);
467 tcg_gen_mov_i64(cc_dst, dst);
468 tcg_gen_mov_i64(cc_vr, vr);
469 s->cc_op = op;
472 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
474 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
477 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
479 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
482 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
484 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
487 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
489 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
492 /* CC value is in env->cc_op */
493 static void set_cc_static(DisasContext *s)
495 if (live_cc_data(s)) {
496 tcg_gen_discard_i64(cc_src);
497 tcg_gen_discard_i64(cc_dst);
498 tcg_gen_discard_i64(cc_vr);
500 s->cc_op = CC_OP_STATIC;
503 /* calculates cc into cc_op */
504 static void gen_op_calc_cc(DisasContext *s)
506 TCGv_i32 local_cc_op;
507 TCGv_i64 dummy;
509 TCGV_UNUSED_I32(local_cc_op);
510 TCGV_UNUSED_I64(dummy);
511 switch (s->cc_op) {
512 default:
513 dummy = tcg_const_i64(0);
514 /* FALLTHRU */
515 case CC_OP_ADD_64:
516 case CC_OP_ADDU_64:
517 case CC_OP_ADDC_64:
518 case CC_OP_SUB_64:
519 case CC_OP_SUBU_64:
520 case CC_OP_SUBB_64:
521 case CC_OP_ADD_32:
522 case CC_OP_ADDU_32:
523 case CC_OP_ADDC_32:
524 case CC_OP_SUB_32:
525 case CC_OP_SUBU_32:
526 case CC_OP_SUBB_32:
527 local_cc_op = tcg_const_i32(s->cc_op);
528 break;
529 case CC_OP_CONST0:
530 case CC_OP_CONST1:
531 case CC_OP_CONST2:
532 case CC_OP_CONST3:
533 case CC_OP_STATIC:
534 case CC_OP_DYNAMIC:
535 break;
538 switch (s->cc_op) {
539 case CC_OP_CONST0:
540 case CC_OP_CONST1:
541 case CC_OP_CONST2:
542 case CC_OP_CONST3:
543 /* s->cc_op is the cc value */
544 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
545 break;
546 case CC_OP_STATIC:
547 /* env->cc_op already is the cc value */
548 break;
549 case CC_OP_NZ:
550 case CC_OP_ABS_64:
551 case CC_OP_NABS_64:
552 case CC_OP_ABS_32:
553 case CC_OP_NABS_32:
554 case CC_OP_LTGT0_32:
555 case CC_OP_LTGT0_64:
556 case CC_OP_COMP_32:
557 case CC_OP_COMP_64:
558 case CC_OP_NZ_F32:
559 case CC_OP_NZ_F64:
560 case CC_OP_FLOGR:
561 /* 1 argument */
562 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
563 break;
564 case CC_OP_ICM:
565 case CC_OP_LTGT_32:
566 case CC_OP_LTGT_64:
567 case CC_OP_LTUGTU_32:
568 case CC_OP_LTUGTU_64:
569 case CC_OP_TM_32:
570 case CC_OP_TM_64:
571 case CC_OP_SLA_32:
572 case CC_OP_SLA_64:
573 case CC_OP_NZ_F128:
574 /* 2 arguments */
575 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
576 break;
577 case CC_OP_ADD_64:
578 case CC_OP_ADDU_64:
579 case CC_OP_ADDC_64:
580 case CC_OP_SUB_64:
581 case CC_OP_SUBU_64:
582 case CC_OP_SUBB_64:
583 case CC_OP_ADD_32:
584 case CC_OP_ADDU_32:
585 case CC_OP_ADDC_32:
586 case CC_OP_SUB_32:
587 case CC_OP_SUBU_32:
588 case CC_OP_SUBB_32:
589 /* 3 arguments */
590 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
591 break;
592 case CC_OP_DYNAMIC:
593 /* unknown operation - assume 3 arguments and cc_op in env */
594 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
595 break;
596 default:
597 tcg_abort();
600 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
601 tcg_temp_free_i32(local_cc_op);
603 if (!TCGV_IS_UNUSED_I64(dummy)) {
604 tcg_temp_free_i64(dummy);
607 /* We now have cc in cc_op as constant */
608 set_cc_static(s);
611 static bool use_exit_tb(DisasContext *s)
613 return (s->singlestep_enabled ||
614 (s->tb->cflags & CF_LAST_IO) ||
615 (s->tb->flags & FLAG_MASK_PER));
618 static bool use_goto_tb(DisasContext *s, uint64_t dest)
620 if (unlikely(use_exit_tb(s))) {
621 return false;
623 #ifndef CONFIG_USER_ONLY
624 return (dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) ||
625 (dest & TARGET_PAGE_MASK) == (s->pc & TARGET_PAGE_MASK);
626 #else
627 return true;
628 #endif
631 static void account_noninline_branch(DisasContext *s, int cc_op)
633 #ifdef DEBUG_INLINE_BRANCHES
634 inline_branch_miss[cc_op]++;
635 #endif
638 static void account_inline_branch(DisasContext *s, int cc_op)
640 #ifdef DEBUG_INLINE_BRANCHES
641 inline_branch_hit[cc_op]++;
642 #endif
645 /* Table of mask values to comparison codes, given a comparison as input.
646 For such, CC=3 should not be possible. */
647 static const TCGCond ltgt_cond[16] = {
648 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
649 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
650 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
651 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
652 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
653 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
654 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
655 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
658 /* Table of mask values to comparison codes, given a logic op as input.
659 For such, only CC=0 and CC=1 should be possible. */
660 static const TCGCond nz_cond[16] = {
661 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
662 TCG_COND_NEVER, TCG_COND_NEVER,
663 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
664 TCG_COND_NE, TCG_COND_NE,
665 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
666 TCG_COND_EQ, TCG_COND_EQ,
667 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
668 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
671 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
672 details required to generate a TCG comparison. */
673 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
675 TCGCond cond;
676 enum cc_op old_cc_op = s->cc_op;
678 if (mask == 15 || mask == 0) {
679 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
680 c->u.s32.a = cc_op;
681 c->u.s32.b = cc_op;
682 c->g1 = c->g2 = true;
683 c->is_64 = false;
684 return;
687 /* Find the TCG condition for the mask + cc op. */
688 switch (old_cc_op) {
689 case CC_OP_LTGT0_32:
690 case CC_OP_LTGT0_64:
691 case CC_OP_LTGT_32:
692 case CC_OP_LTGT_64:
693 cond = ltgt_cond[mask];
694 if (cond == TCG_COND_NEVER) {
695 goto do_dynamic;
697 account_inline_branch(s, old_cc_op);
698 break;
700 case CC_OP_LTUGTU_32:
701 case CC_OP_LTUGTU_64:
702 cond = tcg_unsigned_cond(ltgt_cond[mask]);
703 if (cond == TCG_COND_NEVER) {
704 goto do_dynamic;
706 account_inline_branch(s, old_cc_op);
707 break;
709 case CC_OP_NZ:
710 cond = nz_cond[mask];
711 if (cond == TCG_COND_NEVER) {
712 goto do_dynamic;
714 account_inline_branch(s, old_cc_op);
715 break;
717 case CC_OP_TM_32:
718 case CC_OP_TM_64:
719 switch (mask) {
720 case 8:
721 cond = TCG_COND_EQ;
722 break;
723 case 4 | 2 | 1:
724 cond = TCG_COND_NE;
725 break;
726 default:
727 goto do_dynamic;
729 account_inline_branch(s, old_cc_op);
730 break;
732 case CC_OP_ICM:
733 switch (mask) {
734 case 8:
735 cond = TCG_COND_EQ;
736 break;
737 case 4 | 2 | 1:
738 case 4 | 2:
739 cond = TCG_COND_NE;
740 break;
741 default:
742 goto do_dynamic;
744 account_inline_branch(s, old_cc_op);
745 break;
747 case CC_OP_FLOGR:
748 switch (mask & 0xa) {
749 case 8: /* src == 0 -> no one bit found */
750 cond = TCG_COND_EQ;
751 break;
752 case 2: /* src != 0 -> one bit found */
753 cond = TCG_COND_NE;
754 break;
755 default:
756 goto do_dynamic;
758 account_inline_branch(s, old_cc_op);
759 break;
761 case CC_OP_ADDU_32:
762 case CC_OP_ADDU_64:
763 switch (mask) {
764 case 8 | 2: /* vr == 0 */
765 cond = TCG_COND_EQ;
766 break;
767 case 4 | 1: /* vr != 0 */
768 cond = TCG_COND_NE;
769 break;
770 case 8 | 4: /* no carry -> vr >= src */
771 cond = TCG_COND_GEU;
772 break;
773 case 2 | 1: /* carry -> vr < src */
774 cond = TCG_COND_LTU;
775 break;
776 default:
777 goto do_dynamic;
779 account_inline_branch(s, old_cc_op);
780 break;
782 case CC_OP_SUBU_32:
783 case CC_OP_SUBU_64:
784 /* Note that CC=0 is impossible; treat it as dont-care. */
785 switch (mask & 7) {
786 case 2: /* zero -> op1 == op2 */
787 cond = TCG_COND_EQ;
788 break;
789 case 4 | 1: /* !zero -> op1 != op2 */
790 cond = TCG_COND_NE;
791 break;
792 case 4: /* borrow (!carry) -> op1 < op2 */
793 cond = TCG_COND_LTU;
794 break;
795 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
796 cond = TCG_COND_GEU;
797 break;
798 default:
799 goto do_dynamic;
801 account_inline_branch(s, old_cc_op);
802 break;
804 default:
805 do_dynamic:
806 /* Calculate cc value. */
807 gen_op_calc_cc(s);
808 /* FALLTHRU */
810 case CC_OP_STATIC:
811 /* Jump based on CC. We'll load up the real cond below;
812 the assignment here merely avoids a compiler warning. */
813 account_noninline_branch(s, old_cc_op);
814 old_cc_op = CC_OP_STATIC;
815 cond = TCG_COND_NEVER;
816 break;
819 /* Load up the arguments of the comparison. */
820 c->is_64 = true;
821 c->g1 = c->g2 = false;
822 switch (old_cc_op) {
823 case CC_OP_LTGT0_32:
824 c->is_64 = false;
825 c->u.s32.a = tcg_temp_new_i32();
826 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
827 c->u.s32.b = tcg_const_i32(0);
828 break;
829 case CC_OP_LTGT_32:
830 case CC_OP_LTUGTU_32:
831 case CC_OP_SUBU_32:
832 c->is_64 = false;
833 c->u.s32.a = tcg_temp_new_i32();
834 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
835 c->u.s32.b = tcg_temp_new_i32();
836 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
837 break;
839 case CC_OP_LTGT0_64:
840 case CC_OP_NZ:
841 case CC_OP_FLOGR:
842 c->u.s64.a = cc_dst;
843 c->u.s64.b = tcg_const_i64(0);
844 c->g1 = true;
845 break;
846 case CC_OP_LTGT_64:
847 case CC_OP_LTUGTU_64:
848 case CC_OP_SUBU_64:
849 c->u.s64.a = cc_src;
850 c->u.s64.b = cc_dst;
851 c->g1 = c->g2 = true;
852 break;
854 case CC_OP_TM_32:
855 case CC_OP_TM_64:
856 case CC_OP_ICM:
857 c->u.s64.a = tcg_temp_new_i64();
858 c->u.s64.b = tcg_const_i64(0);
859 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
860 break;
862 case CC_OP_ADDU_32:
863 c->is_64 = false;
864 c->u.s32.a = tcg_temp_new_i32();
865 c->u.s32.b = tcg_temp_new_i32();
866 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
867 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
868 tcg_gen_movi_i32(c->u.s32.b, 0);
869 } else {
870 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
872 break;
874 case CC_OP_ADDU_64:
875 c->u.s64.a = cc_vr;
876 c->g1 = true;
877 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
878 c->u.s64.b = tcg_const_i64(0);
879 } else {
880 c->u.s64.b = cc_src;
881 c->g2 = true;
883 break;
885 case CC_OP_STATIC:
886 c->is_64 = false;
887 c->u.s32.a = cc_op;
888 c->g1 = true;
889 switch (mask) {
890 case 0x8 | 0x4 | 0x2: /* cc != 3 */
891 cond = TCG_COND_NE;
892 c->u.s32.b = tcg_const_i32(3);
893 break;
894 case 0x8 | 0x4 | 0x1: /* cc != 2 */
895 cond = TCG_COND_NE;
896 c->u.s32.b = tcg_const_i32(2);
897 break;
898 case 0x8 | 0x2 | 0x1: /* cc != 1 */
899 cond = TCG_COND_NE;
900 c->u.s32.b = tcg_const_i32(1);
901 break;
902 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
903 cond = TCG_COND_EQ;
904 c->g1 = false;
905 c->u.s32.a = tcg_temp_new_i32();
906 c->u.s32.b = tcg_const_i32(0);
907 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
908 break;
909 case 0x8 | 0x4: /* cc < 2 */
910 cond = TCG_COND_LTU;
911 c->u.s32.b = tcg_const_i32(2);
912 break;
913 case 0x8: /* cc == 0 */
914 cond = TCG_COND_EQ;
915 c->u.s32.b = tcg_const_i32(0);
916 break;
917 case 0x4 | 0x2 | 0x1: /* cc != 0 */
918 cond = TCG_COND_NE;
919 c->u.s32.b = tcg_const_i32(0);
920 break;
921 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
922 cond = TCG_COND_NE;
923 c->g1 = false;
924 c->u.s32.a = tcg_temp_new_i32();
925 c->u.s32.b = tcg_const_i32(0);
926 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
927 break;
928 case 0x4: /* cc == 1 */
929 cond = TCG_COND_EQ;
930 c->u.s32.b = tcg_const_i32(1);
931 break;
932 case 0x2 | 0x1: /* cc > 1 */
933 cond = TCG_COND_GTU;
934 c->u.s32.b = tcg_const_i32(1);
935 break;
936 case 0x2: /* cc == 2 */
937 cond = TCG_COND_EQ;
938 c->u.s32.b = tcg_const_i32(2);
939 break;
940 case 0x1: /* cc == 3 */
941 cond = TCG_COND_EQ;
942 c->u.s32.b = tcg_const_i32(3);
943 break;
944 default:
945 /* CC is masked by something else: (8 >> cc) & mask. */
946 cond = TCG_COND_NE;
947 c->g1 = false;
948 c->u.s32.a = tcg_const_i32(8);
949 c->u.s32.b = tcg_const_i32(0);
950 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
951 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
952 break;
954 break;
956 default:
957 abort();
959 c->cond = cond;
962 static void free_compare(DisasCompare *c)
964 if (!c->g1) {
965 if (c->is_64) {
966 tcg_temp_free_i64(c->u.s64.a);
967 } else {
968 tcg_temp_free_i32(c->u.s32.a);
971 if (!c->g2) {
972 if (c->is_64) {
973 tcg_temp_free_i64(c->u.s64.b);
974 } else {
975 tcg_temp_free_i32(c->u.s32.b);
980 /* ====================================================================== */
981 /* Define the insn format enumeration. */
982 #define F0(N) FMT_##N,
983 #define F1(N, X1) F0(N)
984 #define F2(N, X1, X2) F0(N)
985 #define F3(N, X1, X2, X3) F0(N)
986 #define F4(N, X1, X2, X3, X4) F0(N)
987 #define F5(N, X1, X2, X3, X4, X5) F0(N)
989 typedef enum {
990 #include "insn-format.def"
991 } DisasFormat;
993 #undef F0
994 #undef F1
995 #undef F2
996 #undef F3
997 #undef F4
998 #undef F5
1000 /* Define a structure to hold the decoded fields. We'll store each inside
1001 an array indexed by an enum. In order to conserve memory, we'll arrange
1002 for fields that do not exist at the same time to overlap, thus the "C"
1003 for compact. For checking purposes there is an "O" for original index
1004 as well that will be applied to availability bitmaps. */
1006 enum DisasFieldIndexO {
1007 FLD_O_r1,
1008 FLD_O_r2,
1009 FLD_O_r3,
1010 FLD_O_m1,
1011 FLD_O_m3,
1012 FLD_O_m4,
1013 FLD_O_b1,
1014 FLD_O_b2,
1015 FLD_O_b4,
1016 FLD_O_d1,
1017 FLD_O_d2,
1018 FLD_O_d4,
1019 FLD_O_x2,
1020 FLD_O_l1,
1021 FLD_O_l2,
1022 FLD_O_i1,
1023 FLD_O_i2,
1024 FLD_O_i3,
1025 FLD_O_i4,
1026 FLD_O_i5
1029 enum DisasFieldIndexC {
1030 FLD_C_r1 = 0,
1031 FLD_C_m1 = 0,
1032 FLD_C_b1 = 0,
1033 FLD_C_i1 = 0,
1035 FLD_C_r2 = 1,
1036 FLD_C_b2 = 1,
1037 FLD_C_i2 = 1,
1039 FLD_C_r3 = 2,
1040 FLD_C_m3 = 2,
1041 FLD_C_i3 = 2,
1043 FLD_C_m4 = 3,
1044 FLD_C_b4 = 3,
1045 FLD_C_i4 = 3,
1046 FLD_C_l1 = 3,
1048 FLD_C_i5 = 4,
1049 FLD_C_d1 = 4,
1051 FLD_C_d2 = 5,
1053 FLD_C_d4 = 6,
1054 FLD_C_x2 = 6,
1055 FLD_C_l2 = 6,
1057 NUM_C_FIELD = 7
1060 struct DisasFields {
1061 uint64_t raw_insn;
1062 unsigned op:8;
1063 unsigned op2:8;
1064 unsigned presentC:16;
1065 unsigned int presentO;
1066 int c[NUM_C_FIELD];
1069 /* This is the way fields are to be accessed out of DisasFields. */
1070 #define have_field(S, F) have_field1((S), FLD_O_##F)
1071 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1073 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1075 return (f->presentO >> c) & 1;
1078 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1079 enum DisasFieldIndexC c)
1081 assert(have_field1(f, o));
1082 return f->c[c];
1085 /* Describe the layout of each field in each format. */
1086 typedef struct DisasField {
1087 unsigned int beg:8;
1088 unsigned int size:8;
1089 unsigned int type:2;
1090 unsigned int indexC:6;
1091 enum DisasFieldIndexO indexO:8;
1092 } DisasField;
1094 typedef struct DisasFormatInfo {
1095 DisasField op[NUM_C_FIELD];
1096 } DisasFormatInfo;
1098 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1099 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1100 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1101 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1102 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1103 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1104 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1105 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1106 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1107 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1108 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1109 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1110 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1111 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1113 #define F0(N) { { } },
1114 #define F1(N, X1) { { X1 } },
1115 #define F2(N, X1, X2) { { X1, X2 } },
1116 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1117 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1118 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1120 static const DisasFormatInfo format_info[] = {
1121 #include "insn-format.def"
1124 #undef F0
1125 #undef F1
1126 #undef F2
1127 #undef F3
1128 #undef F4
1129 #undef F5
1130 #undef R
1131 #undef M
1132 #undef BD
1133 #undef BXD
1134 #undef BDL
1135 #undef BXDL
1136 #undef I
1137 #undef L
1139 /* Generally, we'll extract operands into this structures, operate upon
1140 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1141 of routines below for more details. */
1142 typedef struct {
1143 bool g_out, g_out2, g_in1, g_in2;
1144 TCGv_i64 out, out2, in1, in2;
1145 TCGv_i64 addr1;
1146 } DisasOps;
1148 /* Instructions can place constraints on their operands, raising specification
1149 exceptions if they are violated. To make this easy to automate, each "in1",
1150 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1151 of the following, or 0. To make this easy to document, we'll put the
1152 SPEC_<name> defines next to <name>. */
1154 #define SPEC_r1_even 1
1155 #define SPEC_r2_even 2
1156 #define SPEC_r3_even 4
1157 #define SPEC_r1_f128 8
1158 #define SPEC_r2_f128 16
1160 /* Return values from translate_one, indicating the state of the TB. */
1161 typedef enum {
1162 /* Continue the TB. */
1163 NO_EXIT,
1164 /* We have emitted one or more goto_tb. No fixup required. */
1165 EXIT_GOTO_TB,
1166 /* We are not using a goto_tb (for whatever reason), but have updated
1167 the PC (for whatever reason), so there's no need to do it again on
1168 exiting the TB. */
1169 EXIT_PC_UPDATED,
1170 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1171 updated the PC for the next instruction to be executed. */
1172 EXIT_PC_STALE,
1173 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1174 No following code will be executed. */
1175 EXIT_NORETURN,
1176 } ExitStatus;
1178 typedef enum DisasFacility {
1179 FAC_Z, /* zarch (default) */
1180 FAC_CASS, /* compare and swap and store */
1181 FAC_CASS2, /* compare and swap and store 2*/
1182 FAC_DFP, /* decimal floating point */
1183 FAC_DFPR, /* decimal floating point rounding */
1184 FAC_DO, /* distinct operands */
1185 FAC_EE, /* execute extensions */
1186 FAC_EI, /* extended immediate */
1187 FAC_FPE, /* floating point extension */
1188 FAC_FPSSH, /* floating point support sign handling */
1189 FAC_FPRGR, /* FPR-GR transfer */
1190 FAC_GIE, /* general instructions extension */
1191 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1192 FAC_HW, /* high-word */
1193 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1194 FAC_MIE, /* miscellaneous-instruction-extensions */
1195 FAC_LAT, /* load-and-trap */
1196 FAC_LOC, /* load/store on condition */
1197 FAC_LD, /* long displacement */
1198 FAC_PC, /* population count */
1199 FAC_SCF, /* store clock fast */
1200 FAC_SFLE, /* store facility list extended */
1201 FAC_ILA, /* interlocked access facility 1 */
1202 FAC_LPP, /* load-program-parameter */
1203 } DisasFacility;
1205 struct DisasInsn {
1206 unsigned opc:16;
1207 DisasFormat fmt:8;
1208 DisasFacility fac:8;
1209 unsigned spec:8;
1211 const char *name;
1213 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1214 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1215 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1216 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1217 void (*help_cout)(DisasContext *, DisasOps *);
1218 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1220 uint64_t data;
1223 /* ====================================================================== */
1224 /* Miscellaneous helpers, used by several operations. */
1226 static void help_l2_shift(DisasContext *s, DisasFields *f,
1227 DisasOps *o, int mask)
1229 int b2 = get_field(f, b2);
1230 int d2 = get_field(f, d2);
1232 if (b2 == 0) {
1233 o->in2 = tcg_const_i64(d2 & mask);
1234 } else {
1235 o->in2 = get_address(s, 0, b2, d2);
1236 tcg_gen_andi_i64(o->in2, o->in2, mask);
1240 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1242 if (dest == s->next_pc) {
1243 per_branch(s, true);
1244 return NO_EXIT;
1246 if (use_goto_tb(s, dest)) {
1247 update_cc_op(s);
1248 per_breaking_event(s);
1249 tcg_gen_goto_tb(0);
1250 tcg_gen_movi_i64(psw_addr, dest);
1251 tcg_gen_exit_tb((uintptr_t)s->tb);
1252 return EXIT_GOTO_TB;
1253 } else {
1254 tcg_gen_movi_i64(psw_addr, dest);
1255 per_branch(s, false);
1256 return EXIT_PC_UPDATED;
1260 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1261 bool is_imm, int imm, TCGv_i64 cdest)
1263 ExitStatus ret;
1264 uint64_t dest = s->pc + 2 * imm;
1265 TCGLabel *lab;
1267 /* Take care of the special cases first. */
1268 if (c->cond == TCG_COND_NEVER) {
1269 ret = NO_EXIT;
1270 goto egress;
1272 if (is_imm) {
1273 if (dest == s->next_pc) {
1274 /* Branch to next. */
1275 per_branch(s, true);
1276 ret = NO_EXIT;
1277 goto egress;
1279 if (c->cond == TCG_COND_ALWAYS) {
1280 ret = help_goto_direct(s, dest);
1281 goto egress;
1283 } else {
1284 if (TCGV_IS_UNUSED_I64(cdest)) {
1285 /* E.g. bcr %r0 -> no branch. */
1286 ret = NO_EXIT;
1287 goto egress;
1289 if (c->cond == TCG_COND_ALWAYS) {
1290 tcg_gen_mov_i64(psw_addr, cdest);
1291 per_branch(s, false);
1292 ret = EXIT_PC_UPDATED;
1293 goto egress;
1297 if (use_goto_tb(s, s->next_pc)) {
1298 if (is_imm && use_goto_tb(s, dest)) {
1299 /* Both exits can use goto_tb. */
1300 update_cc_op(s);
1302 lab = gen_new_label();
1303 if (c->is_64) {
1304 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1305 } else {
1306 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1309 /* Branch not taken. */
1310 tcg_gen_goto_tb(0);
1311 tcg_gen_movi_i64(psw_addr, s->next_pc);
1312 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1314 /* Branch taken. */
1315 gen_set_label(lab);
1316 per_breaking_event(s);
1317 tcg_gen_goto_tb(1);
1318 tcg_gen_movi_i64(psw_addr, dest);
1319 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1321 ret = EXIT_GOTO_TB;
1322 } else {
1323 /* Fallthru can use goto_tb, but taken branch cannot. */
1324 /* Store taken branch destination before the brcond. This
1325 avoids having to allocate a new local temp to hold it.
1326 We'll overwrite this in the not taken case anyway. */
1327 if (!is_imm) {
1328 tcg_gen_mov_i64(psw_addr, cdest);
1331 lab = gen_new_label();
1332 if (c->is_64) {
1333 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1334 } else {
1335 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1338 /* Branch not taken. */
1339 update_cc_op(s);
1340 tcg_gen_goto_tb(0);
1341 tcg_gen_movi_i64(psw_addr, s->next_pc);
1342 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1344 gen_set_label(lab);
1345 if (is_imm) {
1346 tcg_gen_movi_i64(psw_addr, dest);
1348 per_breaking_event(s);
1349 ret = EXIT_PC_UPDATED;
1351 } else {
1352 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1353 Most commonly we're single-stepping or some other condition that
1354 disables all use of goto_tb. Just update the PC and exit. */
1356 TCGv_i64 next = tcg_const_i64(s->next_pc);
1357 if (is_imm) {
1358 cdest = tcg_const_i64(dest);
1361 if (c->is_64) {
1362 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1363 cdest, next);
1364 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1365 } else {
1366 TCGv_i32 t0 = tcg_temp_new_i32();
1367 TCGv_i64 t1 = tcg_temp_new_i64();
1368 TCGv_i64 z = tcg_const_i64(0);
1369 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1370 tcg_gen_extu_i32_i64(t1, t0);
1371 tcg_temp_free_i32(t0);
1372 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1373 per_branch_cond(s, TCG_COND_NE, t1, z);
1374 tcg_temp_free_i64(t1);
1375 tcg_temp_free_i64(z);
1378 if (is_imm) {
1379 tcg_temp_free_i64(cdest);
1381 tcg_temp_free_i64(next);
1383 ret = EXIT_PC_UPDATED;
1386 egress:
1387 free_compare(c);
1388 return ret;
1391 /* ====================================================================== */
1392 /* The operations. These perform the bulk of the work for any insn,
1393 usually after the operands have been loaded and output initialized. */
1395 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1397 TCGv_i64 z, n;
1398 z = tcg_const_i64(0);
1399 n = tcg_temp_new_i64();
1400 tcg_gen_neg_i64(n, o->in2);
1401 tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1402 tcg_temp_free_i64(n);
1403 tcg_temp_free_i64(z);
1404 return NO_EXIT;
1407 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1409 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1410 return NO_EXIT;
1413 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1415 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1416 return NO_EXIT;
1419 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1421 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1422 tcg_gen_mov_i64(o->out2, o->in2);
1423 return NO_EXIT;
1426 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1428 tcg_gen_add_i64(o->out, o->in1, o->in2);
1429 return NO_EXIT;
1432 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1434 DisasCompare cmp;
1435 TCGv_i64 carry;
1437 tcg_gen_add_i64(o->out, o->in1, o->in2);
1439 /* The carry flag is the msb of CC, therefore the branch mask that would
1440 create that comparison is 3. Feeding the generated comparison to
1441 setcond produces the carry flag that we desire. */
1442 disas_jcc(s, &cmp, 3);
1443 carry = tcg_temp_new_i64();
1444 if (cmp.is_64) {
1445 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1446 } else {
1447 TCGv_i32 t = tcg_temp_new_i32();
1448 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1449 tcg_gen_extu_i32_i64(carry, t);
1450 tcg_temp_free_i32(t);
1452 free_compare(&cmp);
1454 tcg_gen_add_i64(o->out, o->out, carry);
1455 tcg_temp_free_i64(carry);
1456 return NO_EXIT;
1459 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1461 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1462 return NO_EXIT;
1465 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1467 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1468 return NO_EXIT;
1471 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1473 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1474 return_low128(o->out2);
1475 return NO_EXIT;
1478 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1480 tcg_gen_and_i64(o->out, o->in1, o->in2);
1481 return NO_EXIT;
1484 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1486 int shift = s->insn->data & 0xff;
1487 int size = s->insn->data >> 8;
1488 uint64_t mask = ((1ull << size) - 1) << shift;
1490 assert(!o->g_in2);
1491 tcg_gen_shli_i64(o->in2, o->in2, shift);
1492 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1493 tcg_gen_and_i64(o->out, o->in1, o->in2);
1495 /* Produce the CC from only the bits manipulated. */
1496 tcg_gen_andi_i64(cc_dst, o->out, mask);
1497 set_cc_nz_u64(s, cc_dst);
1498 return NO_EXIT;
1501 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1503 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1504 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1505 tcg_gen_mov_i64(psw_addr, o->in2);
1506 per_branch(s, false);
1507 return EXIT_PC_UPDATED;
1508 } else {
1509 return NO_EXIT;
1513 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1515 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1516 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1519 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1521 int m1 = get_field(s->fields, m1);
1522 bool is_imm = have_field(s->fields, i2);
1523 int imm = is_imm ? get_field(s->fields, i2) : 0;
1524 DisasCompare c;
1526 /* BCR with R2 = 0 causes no branching */
1527 if (have_field(s->fields, r2) && get_field(s->fields, r2) == 0) {
1528 if (m1 == 14) {
1529 /* Perform serialization */
1530 /* FIXME: check for fast-BCR-serialization facility */
1531 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1533 if (m1 == 15) {
1534 /* Perform serialization */
1535 /* FIXME: perform checkpoint-synchronisation */
1536 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1538 return NO_EXIT;
1541 disas_jcc(s, &c, m1);
1542 return help_branch(s, &c, is_imm, imm, o->in2);
1545 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1547 int r1 = get_field(s->fields, r1);
1548 bool is_imm = have_field(s->fields, i2);
1549 int imm = is_imm ? get_field(s->fields, i2) : 0;
1550 DisasCompare c;
1551 TCGv_i64 t;
1553 c.cond = TCG_COND_NE;
1554 c.is_64 = false;
1555 c.g1 = false;
1556 c.g2 = false;
1558 t = tcg_temp_new_i64();
1559 tcg_gen_subi_i64(t, regs[r1], 1);
1560 store_reg32_i64(r1, t);
1561 c.u.s32.a = tcg_temp_new_i32();
1562 c.u.s32.b = tcg_const_i32(0);
1563 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1564 tcg_temp_free_i64(t);
1566 return help_branch(s, &c, is_imm, imm, o->in2);
1569 static ExitStatus op_bcth(DisasContext *s, DisasOps *o)
1571 int r1 = get_field(s->fields, r1);
1572 int imm = get_field(s->fields, i2);
1573 DisasCompare c;
1574 TCGv_i64 t;
1576 c.cond = TCG_COND_NE;
1577 c.is_64 = false;
1578 c.g1 = false;
1579 c.g2 = false;
1581 t = tcg_temp_new_i64();
1582 tcg_gen_shri_i64(t, regs[r1], 32);
1583 tcg_gen_subi_i64(t, t, 1);
1584 store_reg32h_i64(r1, t);
1585 c.u.s32.a = tcg_temp_new_i32();
1586 c.u.s32.b = tcg_const_i32(0);
1587 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1588 tcg_temp_free_i64(t);
1590 return help_branch(s, &c, 1, imm, o->in2);
1593 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1595 int r1 = get_field(s->fields, r1);
1596 bool is_imm = have_field(s->fields, i2);
1597 int imm = is_imm ? get_field(s->fields, i2) : 0;
1598 DisasCompare c;
1600 c.cond = TCG_COND_NE;
1601 c.is_64 = true;
1602 c.g1 = true;
1603 c.g2 = false;
1605 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1606 c.u.s64.a = regs[r1];
1607 c.u.s64.b = tcg_const_i64(0);
1609 return help_branch(s, &c, is_imm, imm, o->in2);
1612 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1614 int r1 = get_field(s->fields, r1);
1615 int r3 = get_field(s->fields, r3);
1616 bool is_imm = have_field(s->fields, i2);
1617 int imm = is_imm ? get_field(s->fields, i2) : 0;
1618 DisasCompare c;
1619 TCGv_i64 t;
1621 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1622 c.is_64 = false;
1623 c.g1 = false;
1624 c.g2 = false;
1626 t = tcg_temp_new_i64();
1627 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1628 c.u.s32.a = tcg_temp_new_i32();
1629 c.u.s32.b = tcg_temp_new_i32();
1630 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1631 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1632 store_reg32_i64(r1, t);
1633 tcg_temp_free_i64(t);
1635 return help_branch(s, &c, is_imm, imm, o->in2);
1638 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1640 int r1 = get_field(s->fields, r1);
1641 int r3 = get_field(s->fields, r3);
1642 bool is_imm = have_field(s->fields, i2);
1643 int imm = is_imm ? get_field(s->fields, i2) : 0;
1644 DisasCompare c;
1646 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1647 c.is_64 = true;
1649 if (r1 == (r3 | 1)) {
1650 c.u.s64.b = load_reg(r3 | 1);
1651 c.g2 = false;
1652 } else {
1653 c.u.s64.b = regs[r3 | 1];
1654 c.g2 = true;
1657 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1658 c.u.s64.a = regs[r1];
1659 c.g1 = true;
1661 return help_branch(s, &c, is_imm, imm, o->in2);
1664 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1666 int imm, m3 = get_field(s->fields, m3);
1667 bool is_imm;
1668 DisasCompare c;
1670 c.cond = ltgt_cond[m3];
1671 if (s->insn->data) {
1672 c.cond = tcg_unsigned_cond(c.cond);
1674 c.is_64 = c.g1 = c.g2 = true;
1675 c.u.s64.a = o->in1;
1676 c.u.s64.b = o->in2;
1678 is_imm = have_field(s->fields, i4);
1679 if (is_imm) {
1680 imm = get_field(s->fields, i4);
1681 } else {
1682 imm = 0;
1683 o->out = get_address(s, 0, get_field(s->fields, b4),
1684 get_field(s->fields, d4));
1687 return help_branch(s, &c, is_imm, imm, o->out);
1690 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1692 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1693 set_cc_static(s);
1694 return NO_EXIT;
1697 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1699 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1700 set_cc_static(s);
1701 return NO_EXIT;
1704 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1706 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1707 set_cc_static(s);
1708 return NO_EXIT;
1711 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1713 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1714 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1715 tcg_temp_free_i32(m3);
1716 gen_set_cc_nz_f32(s, o->in2);
1717 return NO_EXIT;
1720 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1722 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1723 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1724 tcg_temp_free_i32(m3);
1725 gen_set_cc_nz_f64(s, o->in2);
1726 return NO_EXIT;
1729 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1731 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1732 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1733 tcg_temp_free_i32(m3);
1734 gen_set_cc_nz_f128(s, o->in1, o->in2);
1735 return NO_EXIT;
1738 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1740 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1741 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1742 tcg_temp_free_i32(m3);
1743 gen_set_cc_nz_f32(s, o->in2);
1744 return NO_EXIT;
1747 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1749 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1750 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1751 tcg_temp_free_i32(m3);
1752 gen_set_cc_nz_f64(s, o->in2);
1753 return NO_EXIT;
1756 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1758 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1759 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1760 tcg_temp_free_i32(m3);
1761 gen_set_cc_nz_f128(s, o->in1, o->in2);
1762 return NO_EXIT;
1765 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1767 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1768 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1769 tcg_temp_free_i32(m3);
1770 gen_set_cc_nz_f32(s, o->in2);
1771 return NO_EXIT;
1774 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1776 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1777 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1778 tcg_temp_free_i32(m3);
1779 gen_set_cc_nz_f64(s, o->in2);
1780 return NO_EXIT;
1783 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1785 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1786 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1787 tcg_temp_free_i32(m3);
1788 gen_set_cc_nz_f128(s, o->in1, o->in2);
1789 return NO_EXIT;
1792 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1794 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1795 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1796 tcg_temp_free_i32(m3);
1797 gen_set_cc_nz_f32(s, o->in2);
1798 return NO_EXIT;
1801 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1803 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1804 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1805 tcg_temp_free_i32(m3);
1806 gen_set_cc_nz_f64(s, o->in2);
1807 return NO_EXIT;
1810 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1812 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1813 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1814 tcg_temp_free_i32(m3);
1815 gen_set_cc_nz_f128(s, o->in1, o->in2);
1816 return NO_EXIT;
1819 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1821 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1822 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1823 tcg_temp_free_i32(m3);
1824 return NO_EXIT;
1827 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1829 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1830 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1831 tcg_temp_free_i32(m3);
1832 return NO_EXIT;
1835 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1837 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1838 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1839 tcg_temp_free_i32(m3);
1840 return_low128(o->out2);
1841 return NO_EXIT;
1844 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1846 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1847 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1848 tcg_temp_free_i32(m3);
1849 return NO_EXIT;
1852 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1854 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1855 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1856 tcg_temp_free_i32(m3);
1857 return NO_EXIT;
1860 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1862 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1863 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1864 tcg_temp_free_i32(m3);
1865 return_low128(o->out2);
1866 return NO_EXIT;
1869 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1871 int r2 = get_field(s->fields, r2);
1872 TCGv_i64 len = tcg_temp_new_i64();
1874 potential_page_fault(s);
1875 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1876 set_cc_static(s);
1877 return_low128(o->out);
1879 tcg_gen_add_i64(regs[r2], regs[r2], len);
1880 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1881 tcg_temp_free_i64(len);
1883 return NO_EXIT;
1886 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1888 int l = get_field(s->fields, l1);
1889 TCGv_i32 vl;
1891 switch (l + 1) {
1892 case 1:
1893 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1894 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1895 break;
1896 case 2:
1897 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1898 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1899 break;
1900 case 4:
1901 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1902 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1903 break;
1904 case 8:
1905 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1906 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1907 break;
1908 default:
1909 vl = tcg_const_i32(l);
1910 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1911 tcg_temp_free_i32(vl);
1912 set_cc_static(s);
1913 return NO_EXIT;
1915 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1916 return NO_EXIT;
1919 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1921 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1922 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1923 potential_page_fault(s);
1924 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1925 tcg_temp_free_i32(r1);
1926 tcg_temp_free_i32(r3);
1927 set_cc_static(s);
1928 return NO_EXIT;
1931 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1933 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1934 TCGv_i32 t1 = tcg_temp_new_i32();
1935 tcg_gen_extrl_i64_i32(t1, o->in1);
1936 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1937 set_cc_static(s);
1938 tcg_temp_free_i32(t1);
1939 tcg_temp_free_i32(m3);
1940 return NO_EXIT;
1943 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1945 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1946 set_cc_static(s);
1947 return_low128(o->in2);
1948 return NO_EXIT;
1951 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1953 TCGv_i64 t = tcg_temp_new_i64();
1954 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1955 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1956 tcg_gen_or_i64(o->out, o->out, t);
1957 tcg_temp_free_i64(t);
1958 return NO_EXIT;
1961 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1963 int d2 = get_field(s->fields, d2);
1964 int b2 = get_field(s->fields, b2);
1965 TCGv_i64 addr, cc;
1967 /* Note that in1 = R3 (new value) and
1968 in2 = (zero-extended) R1 (expected value). */
1970 addr = get_address(s, 0, b2, d2);
1971 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
1972 get_mem_index(s), s->insn->data | MO_ALIGN);
1973 tcg_temp_free_i64(addr);
1975 /* Are the memory and expected values (un)equal? Note that this setcond
1976 produces the output CC value, thus the NE sense of the test. */
1977 cc = tcg_temp_new_i64();
1978 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1979 tcg_gen_extrl_i64_i32(cc_op, cc);
1980 tcg_temp_free_i64(cc);
1981 set_cc_static(s);
1983 return NO_EXIT;
1986 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1988 int r1 = get_field(s->fields, r1);
1989 int r3 = get_field(s->fields, r3);
1990 int d2 = get_field(s->fields, d2);
1991 int b2 = get_field(s->fields, b2);
1992 TCGv_i64 addr;
1993 TCGv_i32 t_r1, t_r3;
1995 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1996 addr = get_address(s, 0, b2, d2);
1997 t_r1 = tcg_const_i32(r1);
1998 t_r3 = tcg_const_i32(r3);
1999 gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2000 tcg_temp_free_i64(addr);
2001 tcg_temp_free_i32(t_r1);
2002 tcg_temp_free_i32(t_r3);
2004 set_cc_static(s);
2005 return NO_EXIT;
2008 #ifndef CONFIG_USER_ONLY
2009 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
2011 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2012 check_privileged(s);
2013 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
2014 tcg_temp_free_i32(r1);
2015 set_cc_static(s);
2016 return NO_EXIT;
2018 #endif
2020 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2022 TCGv_i64 t1 = tcg_temp_new_i64();
2023 TCGv_i32 t2 = tcg_temp_new_i32();
2024 tcg_gen_extrl_i64_i32(t2, o->in1);
2025 gen_helper_cvd(t1, t2);
2026 tcg_temp_free_i32(t2);
2027 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2028 tcg_temp_free_i64(t1);
2029 return NO_EXIT;
2032 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
2034 int m3 = get_field(s->fields, m3);
2035 TCGLabel *lab = gen_new_label();
2036 TCGCond c;
2038 c = tcg_invert_cond(ltgt_cond[m3]);
2039 if (s->insn->data) {
2040 c = tcg_unsigned_cond(c);
2042 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2044 /* Trap. */
2045 gen_trap(s);
2047 gen_set_label(lab);
2048 return NO_EXIT;
2051 #ifndef CONFIG_USER_ONLY
2052 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2054 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2055 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2056 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2058 check_privileged(s);
2059 update_psw_addr(s);
2060 gen_op_calc_cc(s);
2062 gen_helper_diag(cpu_env, r1, r3, func_code);
2064 tcg_temp_free_i32(func_code);
2065 tcg_temp_free_i32(r3);
2066 tcg_temp_free_i32(r1);
2067 return NO_EXIT;
2069 #endif
2071 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2073 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2074 return_low128(o->out);
2075 return NO_EXIT;
2078 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2080 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2081 return_low128(o->out);
2082 return NO_EXIT;
2085 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2087 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2088 return_low128(o->out);
2089 return NO_EXIT;
2092 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2094 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2095 return_low128(o->out);
2096 return NO_EXIT;
2099 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2101 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2102 return NO_EXIT;
2105 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2107 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2108 return NO_EXIT;
2111 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2113 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2114 return_low128(o->out2);
2115 return NO_EXIT;
2118 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2120 int r2 = get_field(s->fields, r2);
2121 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2122 return NO_EXIT;
2125 static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
2127 /* No cache information provided. */
2128 tcg_gen_movi_i64(o->out, -1);
2129 return NO_EXIT;
2132 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2134 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2135 return NO_EXIT;
2138 static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
2140 int r1 = get_field(s->fields, r1);
2141 int r2 = get_field(s->fields, r2);
2142 TCGv_i64 t = tcg_temp_new_i64();
2144 /* Note the "subsequently" in the PoO, which implies a defined result
2145 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2146 tcg_gen_shri_i64(t, psw_mask, 32);
2147 store_reg32_i64(r1, t);
2148 if (r2 != 0) {
2149 store_reg32_i64(r2, psw_mask);
2152 tcg_temp_free_i64(t);
2153 return NO_EXIT;
2156 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2158 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2159 tb->flags, (ab)use the tb->cs_base field as the address of
2160 the template in memory, and grab 8 bits of tb->flags/cflags for
2161 the contents of the register. We would then recognize all this
2162 in gen_intermediate_code_internal, generating code for exactly
2163 one instruction. This new TB then gets executed normally.
2165 On the other hand, this seems to be mostly used for modifying
2166 MVC inside of memcpy, which needs a helper call anyway. So
2167 perhaps this doesn't bear thinking about any further. */
2169 TCGv_i64 tmp;
2171 update_psw_addr(s);
2172 gen_op_calc_cc(s);
2174 tmp = tcg_const_i64(s->next_pc);
2175 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2176 tcg_temp_free_i64(tmp);
2178 return NO_EXIT;
2181 static ExitStatus op_fieb(DisasContext *s, DisasOps *o)
2183 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2184 gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2185 tcg_temp_free_i32(m3);
2186 return NO_EXIT;
2189 static ExitStatus op_fidb(DisasContext *s, DisasOps *o)
2191 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2192 gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2193 tcg_temp_free_i32(m3);
2194 return NO_EXIT;
2197 static ExitStatus op_fixb(DisasContext *s, DisasOps *o)
2199 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2200 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2201 return_low128(o->out2);
2202 tcg_temp_free_i32(m3);
2203 return NO_EXIT;
2206 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2208 /* We'll use the original input for cc computation, since we get to
2209 compare that against 0, which ought to be better than comparing
2210 the real output against 64. It also lets cc_dst be a convenient
2211 temporary during our computation. */
2212 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2214 /* R1 = IN ? CLZ(IN) : 64. */
2215 tcg_gen_clzi_i64(o->out, o->in2, 64);
2217 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2218 value by 64, which is undefined. But since the shift is 64 iff the
2219 input is zero, we still get the correct result after and'ing. */
2220 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2221 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2222 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2223 return NO_EXIT;
2226 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2228 int m3 = get_field(s->fields, m3);
2229 int pos, len, base = s->insn->data;
2230 TCGv_i64 tmp = tcg_temp_new_i64();
2231 uint64_t ccm;
2233 switch (m3) {
2234 case 0xf:
2235 /* Effectively a 32-bit load. */
2236 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2237 len = 32;
2238 goto one_insert;
2240 case 0xc:
2241 case 0x6:
2242 case 0x3:
2243 /* Effectively a 16-bit load. */
2244 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2245 len = 16;
2246 goto one_insert;
2248 case 0x8:
2249 case 0x4:
2250 case 0x2:
2251 case 0x1:
2252 /* Effectively an 8-bit load. */
2253 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2254 len = 8;
2255 goto one_insert;
2257 one_insert:
2258 pos = base + ctz32(m3) * 8;
2259 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2260 ccm = ((1ull << len) - 1) << pos;
2261 break;
2263 default:
2264 /* This is going to be a sequence of loads and inserts. */
2265 pos = base + 32 - 8;
2266 ccm = 0;
2267 while (m3) {
2268 if (m3 & 0x8) {
2269 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2270 tcg_gen_addi_i64(o->in2, o->in2, 1);
2271 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2272 ccm |= 0xff << pos;
2274 m3 = (m3 << 1) & 0xf;
2275 pos -= 8;
2277 break;
2280 tcg_gen_movi_i64(tmp, ccm);
2281 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2282 tcg_temp_free_i64(tmp);
2283 return NO_EXIT;
2286 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2288 int shift = s->insn->data & 0xff;
2289 int size = s->insn->data >> 8;
2290 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2291 return NO_EXIT;
2294 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2296 TCGv_i64 t1;
2298 gen_op_calc_cc(s);
2299 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2301 t1 = tcg_temp_new_i64();
2302 tcg_gen_shli_i64(t1, psw_mask, 20);
2303 tcg_gen_shri_i64(t1, t1, 36);
2304 tcg_gen_or_i64(o->out, o->out, t1);
2306 tcg_gen_extu_i32_i64(t1, cc_op);
2307 tcg_gen_shli_i64(t1, t1, 28);
2308 tcg_gen_or_i64(o->out, o->out, t1);
2309 tcg_temp_free_i64(t1);
2310 return NO_EXIT;
2313 #ifndef CONFIG_USER_ONLY
2314 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2316 check_privileged(s);
2317 gen_helper_ipte(cpu_env, o->in1, o->in2);
2318 return NO_EXIT;
2321 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2323 check_privileged(s);
2324 gen_helper_iske(o->out, cpu_env, o->in2);
2325 return NO_EXIT;
2327 #endif
2329 static ExitStatus op_laa(DisasContext *s, DisasOps *o)
2331 /* The real output is indeed the original value in memory;
2332 recompute the addition for the computation of CC. */
2333 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2334 s->insn->data | MO_ALIGN);
2335 /* However, we need to recompute the addition for setting CC. */
2336 tcg_gen_add_i64(o->out, o->in1, o->in2);
2337 return NO_EXIT;
2340 static ExitStatus op_lan(DisasContext *s, DisasOps *o)
2342 /* The real output is indeed the original value in memory;
2343 recompute the addition for the computation of CC. */
2344 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2345 s->insn->data | MO_ALIGN);
2346 /* However, we need to recompute the operation for setting CC. */
2347 tcg_gen_and_i64(o->out, o->in1, o->in2);
2348 return NO_EXIT;
2351 static ExitStatus op_lao(DisasContext *s, DisasOps *o)
2353 /* The real output is indeed the original value in memory;
2354 recompute the addition for the computation of CC. */
2355 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2356 s->insn->data | MO_ALIGN);
2357 /* However, we need to recompute the operation for setting CC. */
2358 tcg_gen_or_i64(o->out, o->in1, o->in2);
2359 return NO_EXIT;
2362 static ExitStatus op_lax(DisasContext *s, DisasOps *o)
2364 /* The real output is indeed the original value in memory;
2365 recompute the addition for the computation of CC. */
2366 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2367 s->insn->data | MO_ALIGN);
2368 /* However, we need to recompute the operation for setting CC. */
2369 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2370 return NO_EXIT;
2373 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2375 gen_helper_ldeb(o->out, cpu_env, o->in2);
2376 return NO_EXIT;
2379 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2381 gen_helper_ledb(o->out, cpu_env, o->in2);
2382 return NO_EXIT;
2385 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2387 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2388 return NO_EXIT;
2391 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2393 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2394 return NO_EXIT;
2397 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2399 gen_helper_lxdb(o->out, cpu_env, o->in2);
2400 return_low128(o->out2);
2401 return NO_EXIT;
2404 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2406 gen_helper_lxeb(o->out, cpu_env, o->in2);
2407 return_low128(o->out2);
2408 return NO_EXIT;
2411 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2413 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2414 return NO_EXIT;
2417 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2419 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2420 return NO_EXIT;
2423 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2425 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2426 return NO_EXIT;
2429 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2431 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2432 return NO_EXIT;
2435 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2437 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2438 return NO_EXIT;
2441 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2443 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2444 return NO_EXIT;
2447 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2449 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2450 return NO_EXIT;
2453 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2455 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2456 return NO_EXIT;
2459 static ExitStatus op_lat(DisasContext *s, DisasOps *o)
2461 TCGLabel *lab = gen_new_label();
2462 store_reg32_i64(get_field(s->fields, r1), o->in2);
2463 /* The value is stored even in case of trap. */
2464 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2465 gen_trap(s);
2466 gen_set_label(lab);
2467 return NO_EXIT;
2470 static ExitStatus op_lgat(DisasContext *s, DisasOps *o)
2472 TCGLabel *lab = gen_new_label();
2473 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2474 /* The value is stored even in case of trap. */
2475 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2476 gen_trap(s);
2477 gen_set_label(lab);
2478 return NO_EXIT;
2481 static ExitStatus op_lfhat(DisasContext *s, DisasOps *o)
2483 TCGLabel *lab = gen_new_label();
2484 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2485 /* The value is stored even in case of trap. */
2486 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2487 gen_trap(s);
2488 gen_set_label(lab);
2489 return NO_EXIT;
2492 static ExitStatus op_llgfat(DisasContext *s, DisasOps *o)
2494 TCGLabel *lab = gen_new_label();
2495 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2496 /* The value is stored even in case of trap. */
2497 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2498 gen_trap(s);
2499 gen_set_label(lab);
2500 return NO_EXIT;
2503 static ExitStatus op_llgtat(DisasContext *s, DisasOps *o)
2505 TCGLabel *lab = gen_new_label();
2506 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2507 /* The value is stored even in case of trap. */
2508 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2509 gen_trap(s);
2510 gen_set_label(lab);
2511 return NO_EXIT;
2514 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2516 DisasCompare c;
2518 disas_jcc(s, &c, get_field(s->fields, m3));
2520 if (c.is_64) {
2521 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2522 o->in2, o->in1);
2523 free_compare(&c);
2524 } else {
2525 TCGv_i32 t32 = tcg_temp_new_i32();
2526 TCGv_i64 t, z;
2528 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2529 free_compare(&c);
2531 t = tcg_temp_new_i64();
2532 tcg_gen_extu_i32_i64(t, t32);
2533 tcg_temp_free_i32(t32);
2535 z = tcg_const_i64(0);
2536 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2537 tcg_temp_free_i64(t);
2538 tcg_temp_free_i64(z);
2541 return NO_EXIT;
2544 #ifndef CONFIG_USER_ONLY
2545 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2547 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2548 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2549 check_privileged(s);
2550 potential_page_fault(s);
2551 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2552 tcg_temp_free_i32(r1);
2553 tcg_temp_free_i32(r3);
2554 return NO_EXIT;
2557 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2559 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2560 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2561 check_privileged(s);
2562 potential_page_fault(s);
2563 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2564 tcg_temp_free_i32(r1);
2565 tcg_temp_free_i32(r3);
2566 return NO_EXIT;
2569 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2571 check_privileged(s);
2572 potential_page_fault(s);
2573 gen_helper_lra(o->out, cpu_env, o->in2);
2574 set_cc_static(s);
2575 return NO_EXIT;
2578 static ExitStatus op_lpp(DisasContext *s, DisasOps *o)
2580 check_privileged(s);
2582 tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
2583 return NO_EXIT;
2586 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2588 TCGv_i64 t1, t2;
2590 check_privileged(s);
2591 per_breaking_event(s);
2593 t1 = tcg_temp_new_i64();
2594 t2 = tcg_temp_new_i64();
2595 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2596 tcg_gen_addi_i64(o->in2, o->in2, 4);
2597 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2598 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2599 tcg_gen_shli_i64(t1, t1, 32);
2600 gen_helper_load_psw(cpu_env, t1, t2);
2601 tcg_temp_free_i64(t1);
2602 tcg_temp_free_i64(t2);
2603 return EXIT_NORETURN;
2606 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2608 TCGv_i64 t1, t2;
2610 check_privileged(s);
2611 per_breaking_event(s);
2613 t1 = tcg_temp_new_i64();
2614 t2 = tcg_temp_new_i64();
2615 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2616 tcg_gen_addi_i64(o->in2, o->in2, 8);
2617 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2618 gen_helper_load_psw(cpu_env, t1, t2);
2619 tcg_temp_free_i64(t1);
2620 tcg_temp_free_i64(t2);
2621 return EXIT_NORETURN;
2623 #endif
2625 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2627 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2628 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2629 gen_helper_lam(cpu_env, r1, o->in2, r3);
2630 tcg_temp_free_i32(r1);
2631 tcg_temp_free_i32(r3);
2632 return NO_EXIT;
2635 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2637 int r1 = get_field(s->fields, r1);
2638 int r3 = get_field(s->fields, r3);
2639 TCGv_i64 t1, t2;
2641 /* Only one register to read. */
2642 t1 = tcg_temp_new_i64();
2643 if (unlikely(r1 == r3)) {
2644 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2645 store_reg32_i64(r1, t1);
2646 tcg_temp_free(t1);
2647 return NO_EXIT;
2650 /* First load the values of the first and last registers to trigger
2651 possible page faults. */
2652 t2 = tcg_temp_new_i64();
2653 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2654 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2655 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2656 store_reg32_i64(r1, t1);
2657 store_reg32_i64(r3, t2);
2659 /* Only two registers to read. */
2660 if (((r1 + 1) & 15) == r3) {
2661 tcg_temp_free(t2);
2662 tcg_temp_free(t1);
2663 return NO_EXIT;
2666 /* Then load the remaining registers. Page fault can't occur. */
2667 r3 = (r3 - 1) & 15;
2668 tcg_gen_movi_i64(t2, 4);
2669 while (r1 != r3) {
2670 r1 = (r1 + 1) & 15;
2671 tcg_gen_add_i64(o->in2, o->in2, t2);
2672 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2673 store_reg32_i64(r1, t1);
2675 tcg_temp_free(t2);
2676 tcg_temp_free(t1);
2678 return NO_EXIT;
2681 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2683 int r1 = get_field(s->fields, r1);
2684 int r3 = get_field(s->fields, r3);
2685 TCGv_i64 t1, t2;
2687 /* Only one register to read. */
2688 t1 = tcg_temp_new_i64();
2689 if (unlikely(r1 == r3)) {
2690 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2691 store_reg32h_i64(r1, t1);
2692 tcg_temp_free(t1);
2693 return NO_EXIT;
2696 /* First load the values of the first and last registers to trigger
2697 possible page faults. */
2698 t2 = tcg_temp_new_i64();
2699 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2700 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2701 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2702 store_reg32h_i64(r1, t1);
2703 store_reg32h_i64(r3, t2);
2705 /* Only two registers to read. */
2706 if (((r1 + 1) & 15) == r3) {
2707 tcg_temp_free(t2);
2708 tcg_temp_free(t1);
2709 return NO_EXIT;
2712 /* Then load the remaining registers. Page fault can't occur. */
2713 r3 = (r3 - 1) & 15;
2714 tcg_gen_movi_i64(t2, 4);
2715 while (r1 != r3) {
2716 r1 = (r1 + 1) & 15;
2717 tcg_gen_add_i64(o->in2, o->in2, t2);
2718 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2719 store_reg32h_i64(r1, t1);
2721 tcg_temp_free(t2);
2722 tcg_temp_free(t1);
2724 return NO_EXIT;
2727 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2729 int r1 = get_field(s->fields, r1);
2730 int r3 = get_field(s->fields, r3);
2731 TCGv_i64 t1, t2;
2733 /* Only one register to read. */
2734 if (unlikely(r1 == r3)) {
2735 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2736 return NO_EXIT;
2739 /* First load the values of the first and last registers to trigger
2740 possible page faults. */
2741 t1 = tcg_temp_new_i64();
2742 t2 = tcg_temp_new_i64();
2743 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2744 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
2745 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
2746 tcg_gen_mov_i64(regs[r1], t1);
2747 tcg_temp_free(t2);
2749 /* Only two registers to read. */
2750 if (((r1 + 1) & 15) == r3) {
2751 tcg_temp_free(t1);
2752 return NO_EXIT;
2755 /* Then load the remaining registers. Page fault can't occur. */
2756 r3 = (r3 - 1) & 15;
2757 tcg_gen_movi_i64(t1, 8);
2758 while (r1 != r3) {
2759 r1 = (r1 + 1) & 15;
2760 tcg_gen_add_i64(o->in2, o->in2, t1);
2761 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2763 tcg_temp_free(t1);
2765 return NO_EXIT;
2768 static ExitStatus op_lpd(DisasContext *s, DisasOps *o)
2770 TCGv_i64 a1, a2;
2771 TCGMemOp mop = s->insn->data;
2773 /* In a parallel context, stop the world and single step. */
2774 if (parallel_cpus) {
2775 potential_page_fault(s);
2776 gen_exception(EXCP_ATOMIC);
2777 return EXIT_NORETURN;
2780 /* In a serial context, perform the two loads ... */
2781 a1 = get_address(s, 0, get_field(s->fields, b1), get_field(s->fields, d1));
2782 a2 = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
2783 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
2784 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
2785 tcg_temp_free_i64(a1);
2786 tcg_temp_free_i64(a2);
2788 /* ... and indicate that we performed them while interlocked. */
2789 gen_op_movi_cc(s, 0);
2790 return NO_EXIT;
2793 #ifndef CONFIG_USER_ONLY
2794 static ExitStatus op_lura(DisasContext *s, DisasOps *o)
2796 check_privileged(s);
2797 potential_page_fault(s);
2798 gen_helper_lura(o->out, cpu_env, o->in2);
2799 return NO_EXIT;
2802 static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
2804 check_privileged(s);
2805 potential_page_fault(s);
2806 gen_helper_lurag(o->out, cpu_env, o->in2);
2807 return NO_EXIT;
2809 #endif
2811 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2813 o->out = o->in2;
2814 o->g_out = o->g_in2;
2815 TCGV_UNUSED_I64(o->in2);
2816 o->g_in2 = false;
2817 return NO_EXIT;
2820 static ExitStatus op_mov2e(DisasContext *s, DisasOps *o)
2822 int b2 = get_field(s->fields, b2);
2823 TCGv ar1 = tcg_temp_new_i64();
2825 o->out = o->in2;
2826 o->g_out = o->g_in2;
2827 TCGV_UNUSED_I64(o->in2);
2828 o->g_in2 = false;
2830 switch (s->tb->flags & FLAG_MASK_ASC) {
2831 case PSW_ASC_PRIMARY >> 32:
2832 tcg_gen_movi_i64(ar1, 0);
2833 break;
2834 case PSW_ASC_ACCREG >> 32:
2835 tcg_gen_movi_i64(ar1, 1);
2836 break;
2837 case PSW_ASC_SECONDARY >> 32:
2838 if (b2) {
2839 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
2840 } else {
2841 tcg_gen_movi_i64(ar1, 0);
2843 break;
2844 case PSW_ASC_HOME >> 32:
2845 tcg_gen_movi_i64(ar1, 2);
2846 break;
2849 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
2850 tcg_temp_free_i64(ar1);
2852 return NO_EXIT;
2855 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2857 o->out = o->in1;
2858 o->out2 = o->in2;
2859 o->g_out = o->g_in1;
2860 o->g_out2 = o->g_in2;
2861 TCGV_UNUSED_I64(o->in1);
2862 TCGV_UNUSED_I64(o->in2);
2863 o->g_in1 = o->g_in2 = false;
2864 return NO_EXIT;
2867 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2869 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2870 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2871 tcg_temp_free_i32(l);
2872 return NO_EXIT;
2875 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2877 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2878 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2879 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2880 tcg_temp_free_i32(r1);
2881 tcg_temp_free_i32(r2);
2882 set_cc_static(s);
2883 return NO_EXIT;
2886 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2888 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2889 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2890 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2891 tcg_temp_free_i32(r1);
2892 tcg_temp_free_i32(r3);
2893 set_cc_static(s);
2894 return NO_EXIT;
2897 #ifndef CONFIG_USER_ONLY
2898 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2900 int r1 = get_field(s->fields, l1);
2901 check_privileged(s);
2902 potential_page_fault(s);
2903 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2904 set_cc_static(s);
2905 return NO_EXIT;
2908 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2910 int r1 = get_field(s->fields, l1);
2911 check_privileged(s);
2912 potential_page_fault(s);
2913 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2914 set_cc_static(s);
2915 return NO_EXIT;
2917 #endif
2919 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2921 gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
2922 set_cc_static(s);
2923 return NO_EXIT;
2926 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2928 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2929 set_cc_static(s);
2930 return_low128(o->in2);
2931 return NO_EXIT;
2934 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2936 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2937 return NO_EXIT;
2940 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2942 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
2943 return NO_EXIT;
2946 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2948 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2949 return NO_EXIT;
2952 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2954 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2955 return NO_EXIT;
2958 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2960 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2961 return NO_EXIT;
2964 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2966 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2967 return_low128(o->out2);
2968 return NO_EXIT;
2971 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2973 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2974 return_low128(o->out2);
2975 return NO_EXIT;
2978 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2980 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2981 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2982 tcg_temp_free_i64(r3);
2983 return NO_EXIT;
2986 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2988 int r3 = get_field(s->fields, r3);
2989 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2990 return NO_EXIT;
2993 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2995 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2996 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2997 tcg_temp_free_i64(r3);
2998 return NO_EXIT;
3001 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
3003 int r3 = get_field(s->fields, r3);
3004 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3005 return NO_EXIT;
3008 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
3010 TCGv_i64 z, n;
3011 z = tcg_const_i64(0);
3012 n = tcg_temp_new_i64();
3013 tcg_gen_neg_i64(n, o->in2);
3014 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3015 tcg_temp_free_i64(n);
3016 tcg_temp_free_i64(z);
3017 return NO_EXIT;
3020 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
3022 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3023 return NO_EXIT;
3026 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
3028 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3029 return NO_EXIT;
3032 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
3034 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3035 tcg_gen_mov_i64(o->out2, o->in2);
3036 return NO_EXIT;
3039 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
3041 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3042 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3043 tcg_temp_free_i32(l);
3044 set_cc_static(s);
3045 return NO_EXIT;
3048 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
3050 tcg_gen_neg_i64(o->out, o->in2);
3051 return NO_EXIT;
3054 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
3056 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3057 return NO_EXIT;
3060 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
3062 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3063 return NO_EXIT;
3066 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
3068 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3069 tcg_gen_mov_i64(o->out2, o->in2);
3070 return NO_EXIT;
3073 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
3075 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3076 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3077 tcg_temp_free_i32(l);
3078 set_cc_static(s);
3079 return NO_EXIT;
3082 static ExitStatus op_or(DisasContext *s, DisasOps *o)
3084 tcg_gen_or_i64(o->out, o->in1, o->in2);
3085 return NO_EXIT;
3088 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
3090 int shift = s->insn->data & 0xff;
3091 int size = s->insn->data >> 8;
3092 uint64_t mask = ((1ull << size) - 1) << shift;
3094 assert(!o->g_in2);
3095 tcg_gen_shli_i64(o->in2, o->in2, shift);
3096 tcg_gen_or_i64(o->out, o->in1, o->in2);
3098 /* Produce the CC from only the bits manipulated. */
3099 tcg_gen_andi_i64(cc_dst, o->out, mask);
3100 set_cc_nz_u64(s, cc_dst);
3101 return NO_EXIT;
3104 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
3106 gen_helper_popcnt(o->out, o->in2);
3107 return NO_EXIT;
3110 #ifndef CONFIG_USER_ONLY
3111 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
3113 check_privileged(s);
3114 gen_helper_ptlb(cpu_env);
3115 return NO_EXIT;
3117 #endif
3119 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
3121 int i3 = get_field(s->fields, i3);
3122 int i4 = get_field(s->fields, i4);
3123 int i5 = get_field(s->fields, i5);
3124 int do_zero = i4 & 0x80;
3125 uint64_t mask, imask, pmask;
3126 int pos, len, rot;
3128 /* Adjust the arguments for the specific insn. */
3129 switch (s->fields->op2) {
3130 case 0x55: /* risbg */
3131 i3 &= 63;
3132 i4 &= 63;
3133 pmask = ~0;
3134 break;
3135 case 0x5d: /* risbhg */
3136 i3 &= 31;
3137 i4 &= 31;
3138 pmask = 0xffffffff00000000ull;
3139 break;
3140 case 0x51: /* risblg */
3141 i3 &= 31;
3142 i4 &= 31;
3143 pmask = 0x00000000ffffffffull;
3144 break;
3145 default:
3146 abort();
3149 /* MASK is the set of bits to be inserted from R2.
3150 Take care for I3/I4 wraparound. */
3151 mask = pmask >> i3;
3152 if (i3 <= i4) {
3153 mask ^= pmask >> i4 >> 1;
3154 } else {
3155 mask |= ~(pmask >> i4 >> 1);
3157 mask &= pmask;
3159 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3160 insns, we need to keep the other half of the register. */
3161 imask = ~mask | ~pmask;
3162 if (do_zero) {
3163 if (s->fields->op2 == 0x55) {
3164 imask = 0;
3165 } else {
3166 imask = ~pmask;
3170 len = i4 - i3 + 1;
3171 pos = 63 - i4;
3172 rot = i5 & 63;
3173 if (s->fields->op2 == 0x5d) {
3174 pos += 32;
3177 /* In some cases we can implement this with extract. */
3178 if (imask == 0 && pos == 0 && len > 0 && rot + len <= 64) {
3179 tcg_gen_extract_i64(o->out, o->in2, rot, len);
3180 return NO_EXIT;
3183 /* In some cases we can implement this with deposit. */
3184 if (len > 0 && (imask == 0 || ~mask == imask)) {
3185 /* Note that we rotate the bits to be inserted to the lsb, not to
3186 the position as described in the PoO. */
3187 rot = (rot - pos) & 63;
3188 } else {
3189 pos = -1;
3192 /* Rotate the input as necessary. */
3193 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3195 /* Insert the selected bits into the output. */
3196 if (pos >= 0) {
3197 if (imask == 0) {
3198 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3199 } else {
3200 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3202 } else if (imask == 0) {
3203 tcg_gen_andi_i64(o->out, o->in2, mask);
3204 } else {
3205 tcg_gen_andi_i64(o->in2, o->in2, mask);
3206 tcg_gen_andi_i64(o->out, o->out, imask);
3207 tcg_gen_or_i64(o->out, o->out, o->in2);
3209 return NO_EXIT;
3212 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
3214 int i3 = get_field(s->fields, i3);
3215 int i4 = get_field(s->fields, i4);
3216 int i5 = get_field(s->fields, i5);
3217 uint64_t mask;
3219 /* If this is a test-only form, arrange to discard the result. */
3220 if (i3 & 0x80) {
3221 o->out = tcg_temp_new_i64();
3222 o->g_out = false;
3225 i3 &= 63;
3226 i4 &= 63;
3227 i5 &= 63;
3229 /* MASK is the set of bits to be operated on from R2.
3230 Take care for I3/I4 wraparound. */
3231 mask = ~0ull >> i3;
3232 if (i3 <= i4) {
3233 mask ^= ~0ull >> i4 >> 1;
3234 } else {
3235 mask |= ~(~0ull >> i4 >> 1);
3238 /* Rotate the input as necessary. */
3239 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3241 /* Operate. */
3242 switch (s->fields->op2) {
3243 case 0x55: /* AND */
3244 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3245 tcg_gen_and_i64(o->out, o->out, o->in2);
3246 break;
3247 case 0x56: /* OR */
3248 tcg_gen_andi_i64(o->in2, o->in2, mask);
3249 tcg_gen_or_i64(o->out, o->out, o->in2);
3250 break;
3251 case 0x57: /* XOR */
3252 tcg_gen_andi_i64(o->in2, o->in2, mask);
3253 tcg_gen_xor_i64(o->out, o->out, o->in2);
3254 break;
3255 default:
3256 abort();
3259 /* Set the CC. */
3260 tcg_gen_andi_i64(cc_dst, o->out, mask);
3261 set_cc_nz_u64(s, cc_dst);
3262 return NO_EXIT;
3265 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
3267 tcg_gen_bswap16_i64(o->out, o->in2);
3268 return NO_EXIT;
3271 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
3273 tcg_gen_bswap32_i64(o->out, o->in2);
3274 return NO_EXIT;
3277 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
3279 tcg_gen_bswap64_i64(o->out, o->in2);
3280 return NO_EXIT;
3283 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
3285 TCGv_i32 t1 = tcg_temp_new_i32();
3286 TCGv_i32 t2 = tcg_temp_new_i32();
3287 TCGv_i32 to = tcg_temp_new_i32();
3288 tcg_gen_extrl_i64_i32(t1, o->in1);
3289 tcg_gen_extrl_i64_i32(t2, o->in2);
3290 tcg_gen_rotl_i32(to, t1, t2);
3291 tcg_gen_extu_i32_i64(o->out, to);
3292 tcg_temp_free_i32(t1);
3293 tcg_temp_free_i32(t2);
3294 tcg_temp_free_i32(to);
3295 return NO_EXIT;
3298 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
3300 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3301 return NO_EXIT;
3304 #ifndef CONFIG_USER_ONLY
3305 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
3307 check_privileged(s);
3308 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3309 set_cc_static(s);
3310 return NO_EXIT;
3313 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
3315 check_privileged(s);
3316 gen_helper_sacf(cpu_env, o->in2);
3317 /* Addressing mode has changed, so end the block. */
3318 return EXIT_PC_STALE;
3320 #endif
3322 static ExitStatus op_sam(DisasContext *s, DisasOps *o)
3324 int sam = s->insn->data;
3325 TCGv_i64 tsam;
3326 uint64_t mask;
3328 switch (sam) {
3329 case 0:
3330 mask = 0xffffff;
3331 break;
3332 case 1:
3333 mask = 0x7fffffff;
3334 break;
3335 default:
3336 mask = -1;
3337 break;
3340 /* Bizarre but true, we check the address of the current insn for the
3341 specification exception, not the next to be executed. Thus the PoO
3342 documents that Bad Things Happen two bytes before the end. */
3343 if (s->pc & ~mask) {
3344 gen_program_exception(s, PGM_SPECIFICATION);
3345 return EXIT_NORETURN;
3347 s->next_pc &= mask;
3349 tsam = tcg_const_i64(sam);
3350 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3351 tcg_temp_free_i64(tsam);
3353 /* Always exit the TB, since we (may have) changed execution mode. */
3354 return EXIT_PC_STALE;
3357 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
3359 int r1 = get_field(s->fields, r1);
3360 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3361 return NO_EXIT;
3364 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
3366 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3367 return NO_EXIT;
3370 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
3372 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3373 return NO_EXIT;
3376 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
3378 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3379 return_low128(o->out2);
3380 return NO_EXIT;
3383 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
3385 gen_helper_sqeb(o->out, cpu_env, o->in2);
3386 return NO_EXIT;
3389 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
3391 gen_helper_sqdb(o->out, cpu_env, o->in2);
3392 return NO_EXIT;
3395 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
3397 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3398 return_low128(o->out2);
3399 return NO_EXIT;
3402 #ifndef CONFIG_USER_ONLY
3403 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
3405 check_privileged(s);
3406 potential_page_fault(s);
3407 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3408 set_cc_static(s);
3409 return NO_EXIT;
3412 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3414 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3415 check_privileged(s);
3416 potential_page_fault(s);
3417 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
3418 set_cc_static(s);
3419 tcg_temp_free_i32(r1);
3420 return NO_EXIT;
3422 #endif
3424 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3426 DisasCompare c;
3427 TCGv_i64 a;
3428 TCGLabel *lab;
3429 int r1;
3431 disas_jcc(s, &c, get_field(s->fields, m3));
3433 /* We want to store when the condition is fulfilled, so branch
3434 out when it's not */
3435 c.cond = tcg_invert_cond(c.cond);
3437 lab = gen_new_label();
3438 if (c.is_64) {
3439 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3440 } else {
3441 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3443 free_compare(&c);
3445 r1 = get_field(s->fields, r1);
3446 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3447 if (s->insn->data) {
3448 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3449 } else {
3450 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3452 tcg_temp_free_i64(a);
3454 gen_set_label(lab);
3455 return NO_EXIT;
3458 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3460 uint64_t sign = 1ull << s->insn->data;
3461 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3462 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3463 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3464 /* The arithmetic left shift is curious in that it does not affect
3465 the sign bit. Copy that over from the source unchanged. */
3466 tcg_gen_andi_i64(o->out, o->out, ~sign);
3467 tcg_gen_andi_i64(o->in1, o->in1, sign);
3468 tcg_gen_or_i64(o->out, o->out, o->in1);
3469 return NO_EXIT;
3472 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3474 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3475 return NO_EXIT;
3478 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3480 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3481 return NO_EXIT;
3484 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3486 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3487 return NO_EXIT;
3490 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3492 gen_helper_sfpc(cpu_env, o->in2);
3493 return NO_EXIT;
3496 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3498 gen_helper_sfas(cpu_env, o->in2);
3499 return NO_EXIT;
3502 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3504 int b2 = get_field(s->fields, b2);
3505 int d2 = get_field(s->fields, d2);
3506 TCGv_i64 t1 = tcg_temp_new_i64();
3507 TCGv_i64 t2 = tcg_temp_new_i64();
3508 int mask, pos, len;
3510 switch (s->fields->op2) {
3511 case 0x99: /* SRNM */
3512 pos = 0, len = 2;
3513 break;
3514 case 0xb8: /* SRNMB */
3515 pos = 0, len = 3;
3516 break;
3517 case 0xb9: /* SRNMT */
3518 pos = 4, len = 3;
3519 break;
3520 default:
3521 tcg_abort();
3523 mask = (1 << len) - 1;
3525 /* Insert the value into the appropriate field of the FPC. */
3526 if (b2 == 0) {
3527 tcg_gen_movi_i64(t1, d2 & mask);
3528 } else {
3529 tcg_gen_addi_i64(t1, regs[b2], d2);
3530 tcg_gen_andi_i64(t1, t1, mask);
3532 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3533 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3534 tcg_temp_free_i64(t1);
3536 /* Then install the new FPC to set the rounding mode in fpu_status. */
3537 gen_helper_sfpc(cpu_env, t2);
3538 tcg_temp_free_i64(t2);
3539 return NO_EXIT;
3542 #ifndef CONFIG_USER_ONLY
3543 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3545 check_privileged(s);
3546 tcg_gen_shri_i64(o->in2, o->in2, 4);
3547 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
3548 return NO_EXIT;
3551 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3553 check_privileged(s);
3554 gen_helper_sske(cpu_env, o->in1, o->in2);
3555 return NO_EXIT;
3558 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3560 check_privileged(s);
3561 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3562 return NO_EXIT;
3565 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3567 check_privileged(s);
3568 /* ??? Surely cpu address != cpu number. In any case the previous
3569 version of this stored more than the required half-word, so it
3570 is unlikely this has ever been tested. */
3571 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3572 return NO_EXIT;
3575 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3577 gen_helper_stck(o->out, cpu_env);
3578 /* ??? We don't implement clock states. */
3579 gen_op_movi_cc(s, 0);
3580 return NO_EXIT;
3583 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3585 TCGv_i64 c1 = tcg_temp_new_i64();
3586 TCGv_i64 c2 = tcg_temp_new_i64();
3587 gen_helper_stck(c1, cpu_env);
3588 /* Shift the 64-bit value into its place as a zero-extended
3589 104-bit value. Note that "bit positions 64-103 are always
3590 non-zero so that they compare differently to STCK"; we set
3591 the least significant bit to 1. */
3592 tcg_gen_shli_i64(c2, c1, 56);
3593 tcg_gen_shri_i64(c1, c1, 8);
3594 tcg_gen_ori_i64(c2, c2, 0x10000);
3595 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3596 tcg_gen_addi_i64(o->in2, o->in2, 8);
3597 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3598 tcg_temp_free_i64(c1);
3599 tcg_temp_free_i64(c2);
3600 /* ??? We don't implement clock states. */
3601 gen_op_movi_cc(s, 0);
3602 return NO_EXIT;
3605 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3607 check_privileged(s);
3608 gen_helper_sckc(cpu_env, o->in2);
3609 return NO_EXIT;
3612 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3614 check_privileged(s);
3615 gen_helper_stckc(o->out, cpu_env);
3616 return NO_EXIT;
3619 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3621 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3622 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3623 check_privileged(s);
3624 potential_page_fault(s);
3625 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3626 tcg_temp_free_i32(r1);
3627 tcg_temp_free_i32(r3);
3628 return NO_EXIT;
3631 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3633 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3634 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3635 check_privileged(s);
3636 potential_page_fault(s);
3637 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3638 tcg_temp_free_i32(r1);
3639 tcg_temp_free_i32(r3);
3640 return NO_EXIT;
3643 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3645 TCGv_i64 t1 = tcg_temp_new_i64();
3647 check_privileged(s);
3648 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3649 tcg_gen_ld32u_i64(t1, cpu_env, offsetof(CPUS390XState, machine_type));
3650 tcg_gen_deposit_i64(o->out, o->out, t1, 32, 32);
3651 tcg_temp_free_i64(t1);
3653 return NO_EXIT;
3656 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3658 check_privileged(s);
3659 gen_helper_spt(cpu_env, o->in2);
3660 return NO_EXIT;
3663 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3665 check_privileged(s);
3666 gen_helper_stfl(cpu_env);
3667 return NO_EXIT;
3670 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3672 check_privileged(s);
3673 gen_helper_stpt(o->out, cpu_env);
3674 return NO_EXIT;
3677 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3679 check_privileged(s);
3680 potential_page_fault(s);
3681 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3682 set_cc_static(s);
3683 return NO_EXIT;
3686 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3688 check_privileged(s);
3689 gen_helper_spx(cpu_env, o->in2);
3690 return NO_EXIT;
3693 static ExitStatus op_xsch(DisasContext *s, DisasOps *o)
3695 check_privileged(s);
3696 potential_page_fault(s);
3697 gen_helper_xsch(cpu_env, regs[1]);
3698 set_cc_static(s);
3699 return NO_EXIT;
3702 static ExitStatus op_csch(DisasContext *s, DisasOps *o)
3704 check_privileged(s);
3705 potential_page_fault(s);
3706 gen_helper_csch(cpu_env, regs[1]);
3707 set_cc_static(s);
3708 return NO_EXIT;
3711 static ExitStatus op_hsch(DisasContext *s, DisasOps *o)
3713 check_privileged(s);
3714 potential_page_fault(s);
3715 gen_helper_hsch(cpu_env, regs[1]);
3716 set_cc_static(s);
3717 return NO_EXIT;
3720 static ExitStatus op_msch(DisasContext *s, DisasOps *o)
3722 check_privileged(s);
3723 potential_page_fault(s);
3724 gen_helper_msch(cpu_env, regs[1], o->in2);
3725 set_cc_static(s);
3726 return NO_EXIT;
3729 static ExitStatus op_rchp(DisasContext *s, DisasOps *o)
3731 check_privileged(s);
3732 potential_page_fault(s);
3733 gen_helper_rchp(cpu_env, regs[1]);
3734 set_cc_static(s);
3735 return NO_EXIT;
3738 static ExitStatus op_rsch(DisasContext *s, DisasOps *o)
3740 check_privileged(s);
3741 potential_page_fault(s);
3742 gen_helper_rsch(cpu_env, regs[1]);
3743 set_cc_static(s);
3744 return NO_EXIT;
3747 static ExitStatus op_ssch(DisasContext *s, DisasOps *o)
3749 check_privileged(s);
3750 potential_page_fault(s);
3751 gen_helper_ssch(cpu_env, regs[1], o->in2);
3752 set_cc_static(s);
3753 return NO_EXIT;
3756 static ExitStatus op_stsch(DisasContext *s, DisasOps *o)
3758 check_privileged(s);
3759 potential_page_fault(s);
3760 gen_helper_stsch(cpu_env, regs[1], o->in2);
3761 set_cc_static(s);
3762 return NO_EXIT;
3765 static ExitStatus op_tsch(DisasContext *s, DisasOps *o)
3767 check_privileged(s);
3768 potential_page_fault(s);
3769 gen_helper_tsch(cpu_env, regs[1], o->in2);
3770 set_cc_static(s);
3771 return NO_EXIT;
3774 static ExitStatus op_chsc(DisasContext *s, DisasOps *o)
3776 check_privileged(s);
3777 potential_page_fault(s);
3778 gen_helper_chsc(cpu_env, o->in2);
3779 set_cc_static(s);
3780 return NO_EXIT;
3783 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
3785 check_privileged(s);
3786 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
3787 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
3788 return NO_EXIT;
3791 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3793 uint64_t i2 = get_field(s->fields, i2);
3794 TCGv_i64 t;
3796 check_privileged(s);
3798 /* It is important to do what the instruction name says: STORE THEN.
3799 If we let the output hook perform the store then if we fault and
3800 restart, we'll have the wrong SYSTEM MASK in place. */
3801 t = tcg_temp_new_i64();
3802 tcg_gen_shri_i64(t, psw_mask, 56);
3803 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3804 tcg_temp_free_i64(t);
3806 if (s->fields->op == 0xac) {
3807 tcg_gen_andi_i64(psw_mask, psw_mask,
3808 (i2 << 56) | 0x00ffffffffffffffull);
3809 } else {
3810 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3812 return NO_EXIT;
3815 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
3817 check_privileged(s);
3818 potential_page_fault(s);
3819 gen_helper_stura(cpu_env, o->in2, o->in1);
3820 return NO_EXIT;
3823 static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
3825 check_privileged(s);
3826 potential_page_fault(s);
3827 gen_helper_sturg(cpu_env, o->in2, o->in1);
3828 return NO_EXIT;
3830 #endif
3832 static ExitStatus op_stfle(DisasContext *s, DisasOps *o)
3834 potential_page_fault(s);
3835 gen_helper_stfle(cc_op, cpu_env, o->in2);
3836 set_cc_static(s);
3837 return NO_EXIT;
3840 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3842 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3843 return NO_EXIT;
3846 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3848 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3849 return NO_EXIT;
3852 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3854 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3855 return NO_EXIT;
3858 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3860 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3861 return NO_EXIT;
3864 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3866 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3867 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3868 gen_helper_stam(cpu_env, r1, o->in2, r3);
3869 tcg_temp_free_i32(r1);
3870 tcg_temp_free_i32(r3);
3871 return NO_EXIT;
3874 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3876 int m3 = get_field(s->fields, m3);
3877 int pos, base = s->insn->data;
3878 TCGv_i64 tmp = tcg_temp_new_i64();
3880 pos = base + ctz32(m3) * 8;
3881 switch (m3) {
3882 case 0xf:
3883 /* Effectively a 32-bit store. */
3884 tcg_gen_shri_i64(tmp, o->in1, pos);
3885 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3886 break;
3888 case 0xc:
3889 case 0x6:
3890 case 0x3:
3891 /* Effectively a 16-bit store. */
3892 tcg_gen_shri_i64(tmp, o->in1, pos);
3893 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3894 break;
3896 case 0x8:
3897 case 0x4:
3898 case 0x2:
3899 case 0x1:
3900 /* Effectively an 8-bit store. */
3901 tcg_gen_shri_i64(tmp, o->in1, pos);
3902 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3903 break;
3905 default:
3906 /* This is going to be a sequence of shifts and stores. */
3907 pos = base + 32 - 8;
3908 while (m3) {
3909 if (m3 & 0x8) {
3910 tcg_gen_shri_i64(tmp, o->in1, pos);
3911 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3912 tcg_gen_addi_i64(o->in2, o->in2, 1);
3914 m3 = (m3 << 1) & 0xf;
3915 pos -= 8;
3917 break;
3919 tcg_temp_free_i64(tmp);
3920 return NO_EXIT;
3923 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3925 int r1 = get_field(s->fields, r1);
3926 int r3 = get_field(s->fields, r3);
3927 int size = s->insn->data;
3928 TCGv_i64 tsize = tcg_const_i64(size);
3930 while (1) {
3931 if (size == 8) {
3932 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3933 } else {
3934 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3936 if (r1 == r3) {
3937 break;
3939 tcg_gen_add_i64(o->in2, o->in2, tsize);
3940 r1 = (r1 + 1) & 15;
3943 tcg_temp_free_i64(tsize);
3944 return NO_EXIT;
3947 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3949 int r1 = get_field(s->fields, r1);
3950 int r3 = get_field(s->fields, r3);
3951 TCGv_i64 t = tcg_temp_new_i64();
3952 TCGv_i64 t4 = tcg_const_i64(4);
3953 TCGv_i64 t32 = tcg_const_i64(32);
3955 while (1) {
3956 tcg_gen_shl_i64(t, regs[r1], t32);
3957 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3958 if (r1 == r3) {
3959 break;
3961 tcg_gen_add_i64(o->in2, o->in2, t4);
3962 r1 = (r1 + 1) & 15;
3965 tcg_temp_free_i64(t);
3966 tcg_temp_free_i64(t4);
3967 tcg_temp_free_i64(t32);
3968 return NO_EXIT;
3971 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3973 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3974 set_cc_static(s);
3975 return_low128(o->in2);
3976 return NO_EXIT;
3979 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3981 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3982 return NO_EXIT;
3985 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3987 DisasCompare cmp;
3988 TCGv_i64 borrow;
3990 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3992 /* The !borrow flag is the msb of CC. Since we want the inverse of
3993 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
3994 disas_jcc(s, &cmp, 8 | 4);
3995 borrow = tcg_temp_new_i64();
3996 if (cmp.is_64) {
3997 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
3998 } else {
3999 TCGv_i32 t = tcg_temp_new_i32();
4000 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
4001 tcg_gen_extu_i32_i64(borrow, t);
4002 tcg_temp_free_i32(t);
4004 free_compare(&cmp);
4006 tcg_gen_sub_i64(o->out, o->out, borrow);
4007 tcg_temp_free_i64(borrow);
4008 return NO_EXIT;
4011 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
4013 TCGv_i32 t;
4015 update_psw_addr(s);
4016 update_cc_op(s);
4018 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
4019 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4020 tcg_temp_free_i32(t);
4022 t = tcg_const_i32(s->next_pc - s->pc);
4023 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4024 tcg_temp_free_i32(t);
4026 gen_exception(EXCP_SVC);
4027 return EXIT_NORETURN;
4030 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
4032 gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4033 set_cc_static(s);
4034 return NO_EXIT;
4037 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
4039 gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4040 set_cc_static(s);
4041 return NO_EXIT;
4044 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
4046 gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4047 set_cc_static(s);
4048 return NO_EXIT;
4051 #ifndef CONFIG_USER_ONLY
4053 static ExitStatus op_testblock(DisasContext *s, DisasOps *o)
4055 check_privileged(s);
4056 potential_page_fault(s);
4057 gen_helper_testblock(cc_op, cpu_env, o->in2);
4058 set_cc_static(s);
4059 return NO_EXIT;
4062 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
4064 potential_page_fault(s);
4065 gen_helper_tprot(cc_op, o->addr1, o->in2);
4066 set_cc_static(s);
4067 return NO_EXIT;
4070 #endif
4072 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
4074 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4075 potential_page_fault(s);
4076 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4077 tcg_temp_free_i32(l);
4078 set_cc_static(s);
4079 return NO_EXIT;
4082 static ExitStatus op_tre(DisasContext *s, DisasOps *o)
4084 potential_page_fault(s);
4085 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4086 return_low128(o->out2);
4087 set_cc_static(s);
4088 return NO_EXIT;
4091 static ExitStatus op_trt(DisasContext *s, DisasOps *o)
4093 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4094 potential_page_fault(s);
4095 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4096 tcg_temp_free_i32(l);
4097 set_cc_static(s);
4098 return NO_EXIT;
4101 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
4103 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4104 potential_page_fault(s);
4105 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4106 tcg_temp_free_i32(l);
4107 return NO_EXIT;
4110 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
4112 int d1 = get_field(s->fields, d1);
4113 int d2 = get_field(s->fields, d2);
4114 int b1 = get_field(s->fields, b1);
4115 int b2 = get_field(s->fields, b2);
4116 int l = get_field(s->fields, l1);
4117 TCGv_i32 t32;
4119 o->addr1 = get_address(s, 0, b1, d1);
4121 /* If the addresses are identical, this is a store/memset of zero. */
4122 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4123 o->in2 = tcg_const_i64(0);
4125 l++;
4126 while (l >= 8) {
4127 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4128 l -= 8;
4129 if (l > 0) {
4130 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4133 if (l >= 4) {
4134 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4135 l -= 4;
4136 if (l > 0) {
4137 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4140 if (l >= 2) {
4141 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4142 l -= 2;
4143 if (l > 0) {
4144 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4147 if (l) {
4148 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4150 gen_op_movi_cc(s, 0);
4151 return NO_EXIT;
4154 /* But in general we'll defer to a helper. */
4155 o->in2 = get_address(s, 0, b2, d2);
4156 t32 = tcg_const_i32(l);
4157 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4158 tcg_temp_free_i32(t32);
4159 set_cc_static(s);
4160 return NO_EXIT;
4163 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
4165 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4166 return NO_EXIT;
4169 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
4171 int shift = s->insn->data & 0xff;
4172 int size = s->insn->data >> 8;
4173 uint64_t mask = ((1ull << size) - 1) << shift;
4175 assert(!o->g_in2);
4176 tcg_gen_shli_i64(o->in2, o->in2, shift);
4177 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4179 /* Produce the CC from only the bits manipulated. */
4180 tcg_gen_andi_i64(cc_dst, o->out, mask);
4181 set_cc_nz_u64(s, cc_dst);
4182 return NO_EXIT;
4185 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
4187 o->out = tcg_const_i64(0);
4188 return NO_EXIT;
4191 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
4193 o->out = tcg_const_i64(0);
4194 o->out2 = o->out;
4195 o->g_out2 = true;
4196 return NO_EXIT;
4199 /* ====================================================================== */
4200 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4201 the original inputs), update the various cc data structures in order to
4202 be able to compute the new condition code. */
4204 static void cout_abs32(DisasContext *s, DisasOps *o)
4206 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4209 static void cout_abs64(DisasContext *s, DisasOps *o)
4211 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4214 static void cout_adds32(DisasContext *s, DisasOps *o)
4216 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4219 static void cout_adds64(DisasContext *s, DisasOps *o)
4221 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4224 static void cout_addu32(DisasContext *s, DisasOps *o)
4226 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
4229 static void cout_addu64(DisasContext *s, DisasOps *o)
4231 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
4234 static void cout_addc32(DisasContext *s, DisasOps *o)
4236 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
4239 static void cout_addc64(DisasContext *s, DisasOps *o)
4241 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
4244 static void cout_cmps32(DisasContext *s, DisasOps *o)
4246 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4249 static void cout_cmps64(DisasContext *s, DisasOps *o)
4251 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4254 static void cout_cmpu32(DisasContext *s, DisasOps *o)
4256 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
4259 static void cout_cmpu64(DisasContext *s, DisasOps *o)
4261 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
4264 static void cout_f32(DisasContext *s, DisasOps *o)
4266 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
4269 static void cout_f64(DisasContext *s, DisasOps *o)
4271 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
4274 static void cout_f128(DisasContext *s, DisasOps *o)
4276 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
4279 static void cout_nabs32(DisasContext *s, DisasOps *o)
4281 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
4284 static void cout_nabs64(DisasContext *s, DisasOps *o)
4286 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
4289 static void cout_neg32(DisasContext *s, DisasOps *o)
4291 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
4294 static void cout_neg64(DisasContext *s, DisasOps *o)
4296 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
4299 static void cout_nz32(DisasContext *s, DisasOps *o)
4301 tcg_gen_ext32u_i64(cc_dst, o->out);
4302 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
4305 static void cout_nz64(DisasContext *s, DisasOps *o)
4307 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
4310 static void cout_s32(DisasContext *s, DisasOps *o)
4312 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
4315 static void cout_s64(DisasContext *s, DisasOps *o)
4317 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
4320 static void cout_subs32(DisasContext *s, DisasOps *o)
4322 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
4325 static void cout_subs64(DisasContext *s, DisasOps *o)
4327 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
4330 static void cout_subu32(DisasContext *s, DisasOps *o)
4332 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
4335 static void cout_subu64(DisasContext *s, DisasOps *o)
4337 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
4340 static void cout_subb32(DisasContext *s, DisasOps *o)
4342 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
4345 static void cout_subb64(DisasContext *s, DisasOps *o)
4347 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
4350 static void cout_tm32(DisasContext *s, DisasOps *o)
4352 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
4355 static void cout_tm64(DisasContext *s, DisasOps *o)
4357 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
4360 /* ====================================================================== */
4361 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4362 with the TCG register to which we will write. Used in combination with
4363 the "wout" generators, in some cases we need a new temporary, and in
4364 some cases we can write to a TCG global. */
4366 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
4368 o->out = tcg_temp_new_i64();
4370 #define SPEC_prep_new 0
4372 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
4374 o->out = tcg_temp_new_i64();
4375 o->out2 = tcg_temp_new_i64();
4377 #define SPEC_prep_new_P 0
4379 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4381 o->out = regs[get_field(f, r1)];
4382 o->g_out = true;
4384 #define SPEC_prep_r1 0
4386 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
4388 int r1 = get_field(f, r1);
4389 o->out = regs[r1];
4390 o->out2 = regs[r1 + 1];
4391 o->g_out = o->g_out2 = true;
4393 #define SPEC_prep_r1_P SPEC_r1_even
4395 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4397 o->out = fregs[get_field(f, r1)];
4398 o->g_out = true;
4400 #define SPEC_prep_f1 0
4402 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4404 int r1 = get_field(f, r1);
4405 o->out = fregs[r1];
4406 o->out2 = fregs[r1 + 2];
4407 o->g_out = o->g_out2 = true;
4409 #define SPEC_prep_x1 SPEC_r1_f128
4411 /* ====================================================================== */
4412 /* The "Write OUTput" generators. These generally perform some non-trivial
4413 copy of data to TCG globals, or to main memory. The trivial cases are
4414 generally handled by having a "prep" generator install the TCG global
4415 as the destination of the operation. */
4417 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4419 store_reg(get_field(f, r1), o->out);
4421 #define SPEC_wout_r1 0
4423 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4425 int r1 = get_field(f, r1);
4426 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
4428 #define SPEC_wout_r1_8 0
4430 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4432 int r1 = get_field(f, r1);
4433 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
4435 #define SPEC_wout_r1_16 0
4437 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4439 store_reg32_i64(get_field(f, r1), o->out);
4441 #define SPEC_wout_r1_32 0
4443 static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
4445 store_reg32h_i64(get_field(f, r1), o->out);
4447 #define SPEC_wout_r1_32h 0
4449 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4451 int r1 = get_field(f, r1);
4452 store_reg32_i64(r1, o->out);
4453 store_reg32_i64(r1 + 1, o->out2);
4455 #define SPEC_wout_r1_P32 SPEC_r1_even
4457 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4459 int r1 = get_field(f, r1);
4460 store_reg32_i64(r1 + 1, o->out);
4461 tcg_gen_shri_i64(o->out, o->out, 32);
4462 store_reg32_i64(r1, o->out);
4464 #define SPEC_wout_r1_D32 SPEC_r1_even
4466 static void wout_r3_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4468 int r3 = get_field(f, r3);
4469 store_reg32_i64(r3, o->out);
4470 store_reg32_i64(r3 + 1, o->out2);
4472 #define SPEC_wout_r3_P32 SPEC_r3_even
4474 static void wout_r3_P64(DisasContext *s, DisasFields *f, DisasOps *o)
4476 int r3 = get_field(f, r3);
4477 store_reg(r3, o->out);
4478 store_reg(r3 + 1, o->out2);
4480 #define SPEC_wout_r3_P64 SPEC_r3_even
4482 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4484 store_freg32_i64(get_field(f, r1), o->out);
4486 #define SPEC_wout_e1 0
4488 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4490 store_freg(get_field(f, r1), o->out);
4492 #define SPEC_wout_f1 0
4494 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4496 int f1 = get_field(s->fields, r1);
4497 store_freg(f1, o->out);
4498 store_freg(f1 + 2, o->out2);
4500 #define SPEC_wout_x1 SPEC_r1_f128
4502 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4504 if (get_field(f, r1) != get_field(f, r2)) {
4505 store_reg32_i64(get_field(f, r1), o->out);
4508 #define SPEC_wout_cond_r1r2_32 0
4510 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
4512 if (get_field(f, r1) != get_field(f, r2)) {
4513 store_freg32_i64(get_field(f, r1), o->out);
4516 #define SPEC_wout_cond_e1e2 0
4518 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4520 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
4522 #define SPEC_wout_m1_8 0
4524 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4526 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
4528 #define SPEC_wout_m1_16 0
4530 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4532 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4534 #define SPEC_wout_m1_32 0
4536 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4538 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4540 #define SPEC_wout_m1_64 0
4542 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4544 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
4546 #define SPEC_wout_m2_32 0
4548 static void wout_in2_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4550 store_reg(get_field(f, r1), o->in2);
4552 #define SPEC_wout_in2_r1 0
4554 static void wout_in2_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4556 store_reg32_i64(get_field(f, r1), o->in2);
4558 #define SPEC_wout_in2_r1_32 0
4560 /* ====================================================================== */
4561 /* The "INput 1" generators. These load the first operand to an insn. */
4563 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4565 o->in1 = load_reg(get_field(f, r1));
4567 #define SPEC_in1_r1 0
4569 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4571 o->in1 = regs[get_field(f, r1)];
4572 o->g_in1 = true;
4574 #define SPEC_in1_r1_o 0
4576 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4578 o->in1 = tcg_temp_new_i64();
4579 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
4581 #define SPEC_in1_r1_32s 0
4583 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4585 o->in1 = tcg_temp_new_i64();
4586 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
4588 #define SPEC_in1_r1_32u 0
4590 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4592 o->in1 = tcg_temp_new_i64();
4593 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
4595 #define SPEC_in1_r1_sr32 0
4597 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
4599 o->in1 = load_reg(get_field(f, r1) + 1);
4601 #define SPEC_in1_r1p1 SPEC_r1_even
4603 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4605 o->in1 = tcg_temp_new_i64();
4606 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
4608 #define SPEC_in1_r1p1_32s SPEC_r1_even
4610 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4612 o->in1 = tcg_temp_new_i64();
4613 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
4615 #define SPEC_in1_r1p1_32u SPEC_r1_even
4617 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4619 int r1 = get_field(f, r1);
4620 o->in1 = tcg_temp_new_i64();
4621 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
4623 #define SPEC_in1_r1_D32 SPEC_r1_even
4625 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4627 o->in1 = load_reg(get_field(f, r2));
4629 #define SPEC_in1_r2 0
4631 static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4633 o->in1 = tcg_temp_new_i64();
4634 tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
4636 #define SPEC_in1_r2_sr32 0
4638 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4640 o->in1 = load_reg(get_field(f, r3));
4642 #define SPEC_in1_r3 0
4644 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4646 o->in1 = regs[get_field(f, r3)];
4647 o->g_in1 = true;
4649 #define SPEC_in1_r3_o 0
4651 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4653 o->in1 = tcg_temp_new_i64();
4654 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
4656 #define SPEC_in1_r3_32s 0
4658 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4660 o->in1 = tcg_temp_new_i64();
4661 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
4663 #define SPEC_in1_r3_32u 0
4665 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4667 int r3 = get_field(f, r3);
4668 o->in1 = tcg_temp_new_i64();
4669 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
4671 #define SPEC_in1_r3_D32 SPEC_r3_even
4673 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4675 o->in1 = load_freg32_i64(get_field(f, r1));
4677 #define SPEC_in1_e1 0
4679 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4681 o->in1 = fregs[get_field(f, r1)];
4682 o->g_in1 = true;
4684 #define SPEC_in1_f1_o 0
4686 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4688 int r1 = get_field(f, r1);
4689 o->out = fregs[r1];
4690 o->out2 = fregs[r1 + 2];
4691 o->g_out = o->g_out2 = true;
4693 #define SPEC_in1_x1_o SPEC_r1_f128
4695 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4697 o->in1 = fregs[get_field(f, r3)];
4698 o->g_in1 = true;
4700 #define SPEC_in1_f3_o 0
4702 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
4704 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
4706 #define SPEC_in1_la1 0
4708 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
4710 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4711 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4713 #define SPEC_in1_la2 0
4715 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4717 in1_la1(s, f, o);
4718 o->in1 = tcg_temp_new_i64();
4719 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
4721 #define SPEC_in1_m1_8u 0
4723 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4725 in1_la1(s, f, o);
4726 o->in1 = tcg_temp_new_i64();
4727 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
4729 #define SPEC_in1_m1_16s 0
4731 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4733 in1_la1(s, f, o);
4734 o->in1 = tcg_temp_new_i64();
4735 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
4737 #define SPEC_in1_m1_16u 0
4739 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4741 in1_la1(s, f, o);
4742 o->in1 = tcg_temp_new_i64();
4743 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4745 #define SPEC_in1_m1_32s 0
4747 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4749 in1_la1(s, f, o);
4750 o->in1 = tcg_temp_new_i64();
4751 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4753 #define SPEC_in1_m1_32u 0
4755 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4757 in1_la1(s, f, o);
4758 o->in1 = tcg_temp_new_i64();
4759 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4761 #define SPEC_in1_m1_64 0
4763 /* ====================================================================== */
4764 /* The "INput 2" generators. These load the second operand to an insn. */
4766 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4768 o->in2 = regs[get_field(f, r1)];
4769 o->g_in2 = true;
4771 #define SPEC_in2_r1_o 0
4773 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4775 o->in2 = tcg_temp_new_i64();
4776 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
4778 #define SPEC_in2_r1_16u 0
4780 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4782 o->in2 = tcg_temp_new_i64();
4783 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
4785 #define SPEC_in2_r1_32u 0
4787 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4789 int r1 = get_field(f, r1);
4790 o->in2 = tcg_temp_new_i64();
4791 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
4793 #define SPEC_in2_r1_D32 SPEC_r1_even
4795 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4797 o->in2 = load_reg(get_field(f, r2));
4799 #define SPEC_in2_r2 0
4801 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4803 o->in2 = regs[get_field(f, r2)];
4804 o->g_in2 = true;
4806 #define SPEC_in2_r2_o 0
4808 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4810 int r2 = get_field(f, r2);
4811 if (r2 != 0) {
4812 o->in2 = load_reg(r2);
4815 #define SPEC_in2_r2_nz 0
4817 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4819 o->in2 = tcg_temp_new_i64();
4820 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4822 #define SPEC_in2_r2_8s 0
4824 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4826 o->in2 = tcg_temp_new_i64();
4827 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4829 #define SPEC_in2_r2_8u 0
4831 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4833 o->in2 = tcg_temp_new_i64();
4834 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4836 #define SPEC_in2_r2_16s 0
4838 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4840 o->in2 = tcg_temp_new_i64();
4841 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4843 #define SPEC_in2_r2_16u 0
4845 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4847 o->in2 = load_reg(get_field(f, r3));
4849 #define SPEC_in2_r3 0
4851 static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4853 o->in2 = tcg_temp_new_i64();
4854 tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
4856 #define SPEC_in2_r3_sr32 0
4858 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4860 o->in2 = tcg_temp_new_i64();
4861 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4863 #define SPEC_in2_r2_32s 0
4865 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4867 o->in2 = tcg_temp_new_i64();
4868 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4870 #define SPEC_in2_r2_32u 0
4872 static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4874 o->in2 = tcg_temp_new_i64();
4875 tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
4877 #define SPEC_in2_r2_sr32 0
4879 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
4881 o->in2 = load_freg32_i64(get_field(f, r2));
4883 #define SPEC_in2_e2 0
4885 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4887 o->in2 = fregs[get_field(f, r2)];
4888 o->g_in2 = true;
4890 #define SPEC_in2_f2_o 0
4892 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4894 int r2 = get_field(f, r2);
4895 o->in1 = fregs[r2];
4896 o->in2 = fregs[r2 + 2];
4897 o->g_in1 = o->g_in2 = true;
4899 #define SPEC_in2_x2_o SPEC_r2_f128
4901 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
4903 o->in2 = get_address(s, 0, get_field(f, r2), 0);
4905 #define SPEC_in2_ra2 0
4907 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
4909 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4910 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4912 #define SPEC_in2_a2 0
4914 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
4916 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
4918 #define SPEC_in2_ri2 0
4920 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
4922 help_l2_shift(s, f, o, 31);
4924 #define SPEC_in2_sh32 0
4926 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
4928 help_l2_shift(s, f, o, 63);
4930 #define SPEC_in2_sh64 0
4932 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4934 in2_a2(s, f, o);
4935 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
4937 #define SPEC_in2_m2_8u 0
4939 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4941 in2_a2(s, f, o);
4942 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
4944 #define SPEC_in2_m2_16s 0
4946 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4948 in2_a2(s, f, o);
4949 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4951 #define SPEC_in2_m2_16u 0
4953 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4955 in2_a2(s, f, o);
4956 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4958 #define SPEC_in2_m2_32s 0
4960 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4962 in2_a2(s, f, o);
4963 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4965 #define SPEC_in2_m2_32u 0
4967 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4969 in2_a2(s, f, o);
4970 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4972 #define SPEC_in2_m2_64 0
4974 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4976 in2_ri2(s, f, o);
4977 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4979 #define SPEC_in2_mri2_16u 0
4981 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4983 in2_ri2(s, f, o);
4984 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4986 #define SPEC_in2_mri2_32s 0
4988 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4990 in2_ri2(s, f, o);
4991 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4993 #define SPEC_in2_mri2_32u 0
4995 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4997 in2_ri2(s, f, o);
4998 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5000 #define SPEC_in2_mri2_64 0
5002 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
5004 o->in2 = tcg_const_i64(get_field(f, i2));
5006 #define SPEC_in2_i2 0
5008 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5010 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
5012 #define SPEC_in2_i2_8u 0
5014 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5016 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
5018 #define SPEC_in2_i2_16u 0
5020 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5022 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
5024 #define SPEC_in2_i2_32u 0
5026 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5028 uint64_t i2 = (uint16_t)get_field(f, i2);
5029 o->in2 = tcg_const_i64(i2 << s->insn->data);
5031 #define SPEC_in2_i2_16u_shl 0
5033 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5035 uint64_t i2 = (uint32_t)get_field(f, i2);
5036 o->in2 = tcg_const_i64(i2 << s->insn->data);
5038 #define SPEC_in2_i2_32u_shl 0
5040 #ifndef CONFIG_USER_ONLY
5041 static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
5043 o->in2 = tcg_const_i64(s->fields->raw_insn);
5045 #define SPEC_in2_insn 0
5046 #endif
5048 /* ====================================================================== */
5050 /* Find opc within the table of insns. This is formulated as a switch
5051 statement so that (1) we get compile-time notice of cut-paste errors
5052 for duplicated opcodes, and (2) the compiler generates the binary
5053 search tree, rather than us having to post-process the table. */
5055 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5056 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5058 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5060 enum DisasInsnEnum {
5061 #include "insn-data.def"
5064 #undef D
5065 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5066 .opc = OPC, \
5067 .fmt = FMT_##FT, \
5068 .fac = FAC_##FC, \
5069 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5070 .name = #NM, \
5071 .help_in1 = in1_##I1, \
5072 .help_in2 = in2_##I2, \
5073 .help_prep = prep_##P, \
5074 .help_wout = wout_##W, \
5075 .help_cout = cout_##CC, \
5076 .help_op = op_##OP, \
5077 .data = D \
5080 /* Allow 0 to be used for NULL in the table below. */
5081 #define in1_0 NULL
5082 #define in2_0 NULL
5083 #define prep_0 NULL
5084 #define wout_0 NULL
5085 #define cout_0 NULL
5086 #define op_0 NULL
5088 #define SPEC_in1_0 0
5089 #define SPEC_in2_0 0
5090 #define SPEC_prep_0 0
5091 #define SPEC_wout_0 0
5093 static const DisasInsn insn_info[] = {
5094 #include "insn-data.def"
5097 #undef D
5098 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5099 case OPC: return &insn_info[insn_ ## NM];
5101 static const DisasInsn *lookup_opc(uint16_t opc)
5103 switch (opc) {
5104 #include "insn-data.def"
5105 default:
5106 return NULL;
5110 #undef D
5111 #undef C
5113 /* Extract a field from the insn. The INSN should be left-aligned in
5114 the uint64_t so that we can more easily utilize the big-bit-endian
5115 definitions we extract from the Principals of Operation. */
5117 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
5119 uint32_t r, m;
5121 if (f->size == 0) {
5122 return;
5125 /* Zero extract the field from the insn. */
5126 r = (insn << f->beg) >> (64 - f->size);
5128 /* Sign-extend, or un-swap the field as necessary. */
5129 switch (f->type) {
5130 case 0: /* unsigned */
5131 break;
5132 case 1: /* signed */
5133 assert(f->size <= 32);
5134 m = 1u << (f->size - 1);
5135 r = (r ^ m) - m;
5136 break;
5137 case 2: /* dl+dh split, signed 20 bit. */
5138 r = ((int8_t)r << 12) | (r >> 8);
5139 break;
5140 default:
5141 abort();
5144 /* Validate that the "compressed" encoding we selected above is valid.
5145 I.e. we havn't make two different original fields overlap. */
5146 assert(((o->presentC >> f->indexC) & 1) == 0);
5147 o->presentC |= 1 << f->indexC;
5148 o->presentO |= 1 << f->indexO;
5150 o->c[f->indexC] = r;
5153 /* Lookup the insn at the current PC, extracting the operands into O and
5154 returning the info struct for the insn. Returns NULL for invalid insn. */
5156 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
5157 DisasFields *f)
5159 uint64_t insn, pc = s->pc;
5160 int op, op2, ilen;
5161 const DisasInsn *info;
5163 insn = ld_code2(env, pc);
5164 op = (insn >> 8) & 0xff;
5165 ilen = get_ilen(op);
5166 s->next_pc = s->pc + ilen;
5168 switch (ilen) {
5169 case 2:
5170 insn = insn << 48;
5171 break;
5172 case 4:
5173 insn = ld_code4(env, pc) << 32;
5174 break;
5175 case 6:
5176 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
5177 break;
5178 default:
5179 abort();
5182 /* We can't actually determine the insn format until we've looked up
5183 the full insn opcode. Which we can't do without locating the
5184 secondary opcode. Assume by default that OP2 is at bit 40; for
5185 those smaller insns that don't actually have a secondary opcode
5186 this will correctly result in OP2 = 0. */
5187 switch (op) {
5188 case 0x01: /* E */
5189 case 0x80: /* S */
5190 case 0x82: /* S */
5191 case 0x93: /* S */
5192 case 0xb2: /* S, RRF, RRE */
5193 case 0xb3: /* RRE, RRD, RRF */
5194 case 0xb9: /* RRE, RRF */
5195 case 0xe5: /* SSE, SIL */
5196 op2 = (insn << 8) >> 56;
5197 break;
5198 case 0xa5: /* RI */
5199 case 0xa7: /* RI */
5200 case 0xc0: /* RIL */
5201 case 0xc2: /* RIL */
5202 case 0xc4: /* RIL */
5203 case 0xc6: /* RIL */
5204 case 0xc8: /* SSF */
5205 case 0xcc: /* RIL */
5206 op2 = (insn << 12) >> 60;
5207 break;
5208 case 0xd0 ... 0xdf: /* SS */
5209 case 0xe1: /* SS */
5210 case 0xe2: /* SS */
5211 case 0xe8: /* SS */
5212 case 0xe9: /* SS */
5213 case 0xea: /* SS */
5214 case 0xee ... 0xf3: /* SS */
5215 case 0xf8 ... 0xfd: /* SS */
5216 op2 = 0;
5217 break;
5218 default:
5219 op2 = (insn << 40) >> 56;
5220 break;
5223 memset(f, 0, sizeof(*f));
5224 f->raw_insn = insn;
5225 f->op = op;
5226 f->op2 = op2;
5228 /* Lookup the instruction. */
5229 info = lookup_opc(op << 8 | op2);
5231 /* If we found it, extract the operands. */
5232 if (info != NULL) {
5233 DisasFormat fmt = info->fmt;
5234 int i;
5236 for (i = 0; i < NUM_C_FIELD; ++i) {
5237 extract_field(f, &format_info[fmt].op[i], insn);
5240 return info;
5243 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
5245 const DisasInsn *insn;
5246 ExitStatus ret = NO_EXIT;
5247 DisasFields f;
5248 DisasOps o;
5250 /* Search for the insn in the table. */
5251 insn = extract_insn(env, s, &f);
5253 /* Not found means unimplemented/illegal opcode. */
5254 if (insn == NULL) {
5255 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
5256 f.op, f.op2);
5257 gen_illegal_opcode(s);
5258 return EXIT_NORETURN;
5261 #ifndef CONFIG_USER_ONLY
5262 if (s->tb->flags & FLAG_MASK_PER) {
5263 TCGv_i64 addr = tcg_const_i64(s->pc);
5264 gen_helper_per_ifetch(cpu_env, addr);
5265 tcg_temp_free_i64(addr);
5267 #endif
5269 /* Check for insn specification exceptions. */
5270 if (insn->spec) {
5271 int spec = insn->spec, excp = 0, r;
5273 if (spec & SPEC_r1_even) {
5274 r = get_field(&f, r1);
5275 if (r & 1) {
5276 excp = PGM_SPECIFICATION;
5279 if (spec & SPEC_r2_even) {
5280 r = get_field(&f, r2);
5281 if (r & 1) {
5282 excp = PGM_SPECIFICATION;
5285 if (spec & SPEC_r3_even) {
5286 r = get_field(&f, r3);
5287 if (r & 1) {
5288 excp = PGM_SPECIFICATION;
5291 if (spec & SPEC_r1_f128) {
5292 r = get_field(&f, r1);
5293 if (r > 13) {
5294 excp = PGM_SPECIFICATION;
5297 if (spec & SPEC_r2_f128) {
5298 r = get_field(&f, r2);
5299 if (r > 13) {
5300 excp = PGM_SPECIFICATION;
5303 if (excp) {
5304 gen_program_exception(s, excp);
5305 return EXIT_NORETURN;
5309 /* Set up the strutures we use to communicate with the helpers. */
5310 s->insn = insn;
5311 s->fields = &f;
5312 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
5313 TCGV_UNUSED_I64(o.out);
5314 TCGV_UNUSED_I64(o.out2);
5315 TCGV_UNUSED_I64(o.in1);
5316 TCGV_UNUSED_I64(o.in2);
5317 TCGV_UNUSED_I64(o.addr1);
5319 /* Implement the instruction. */
5320 if (insn->help_in1) {
5321 insn->help_in1(s, &f, &o);
5323 if (insn->help_in2) {
5324 insn->help_in2(s, &f, &o);
5326 if (insn->help_prep) {
5327 insn->help_prep(s, &f, &o);
5329 if (insn->help_op) {
5330 ret = insn->help_op(s, &o);
5332 if (insn->help_wout) {
5333 insn->help_wout(s, &f, &o);
5335 if (insn->help_cout) {
5336 insn->help_cout(s, &o);
5339 /* Free any temporaries created by the helpers. */
5340 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
5341 tcg_temp_free_i64(o.out);
5343 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
5344 tcg_temp_free_i64(o.out2);
5346 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
5347 tcg_temp_free_i64(o.in1);
5349 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
5350 tcg_temp_free_i64(o.in2);
5352 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
5353 tcg_temp_free_i64(o.addr1);
5356 #ifndef CONFIG_USER_ONLY
5357 if (s->tb->flags & FLAG_MASK_PER) {
5358 /* An exception might be triggered, save PSW if not already done. */
5359 if (ret == NO_EXIT || ret == EXIT_PC_STALE) {
5360 tcg_gen_movi_i64(psw_addr, s->next_pc);
5363 /* Save off cc. */
5364 update_cc_op(s);
5366 /* Call the helper to check for a possible PER exception. */
5367 gen_helper_per_check_exception(cpu_env);
5369 #endif
5371 /* Advance to the next instruction. */
5372 s->pc = s->next_pc;
5373 return ret;
5376 void gen_intermediate_code(CPUS390XState *env, struct TranslationBlock *tb)
5378 S390CPU *cpu = s390_env_get_cpu(env);
5379 CPUState *cs = CPU(cpu);
5380 DisasContext dc;
5381 target_ulong pc_start;
5382 uint64_t next_page_start;
5383 int num_insns, max_insns;
5384 ExitStatus status;
5385 bool do_debug;
5387 pc_start = tb->pc;
5389 /* 31-bit mode */
5390 if (!(tb->flags & FLAG_MASK_64)) {
5391 pc_start &= 0x7fffffff;
5394 dc.tb = tb;
5395 dc.pc = pc_start;
5396 dc.cc_op = CC_OP_DYNAMIC;
5397 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
5399 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
5401 num_insns = 0;
5402 max_insns = tb->cflags & CF_COUNT_MASK;
5403 if (max_insns == 0) {
5404 max_insns = CF_COUNT_MASK;
5406 if (max_insns > TCG_MAX_INSNS) {
5407 max_insns = TCG_MAX_INSNS;
5410 gen_tb_start(tb);
5412 do {
5413 tcg_gen_insn_start(dc.pc, dc.cc_op);
5414 num_insns++;
5416 if (unlikely(cpu_breakpoint_test(cs, dc.pc, BP_ANY))) {
5417 status = EXIT_PC_STALE;
5418 do_debug = true;
5419 /* The address covered by the breakpoint must be included in
5420 [tb->pc, tb->pc + tb->size) in order to for it to be
5421 properly cleared -- thus we increment the PC here so that
5422 the logic setting tb->size below does the right thing. */
5423 dc.pc += 2;
5424 break;
5427 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5428 gen_io_start();
5431 status = NO_EXIT;
5432 if (status == NO_EXIT) {
5433 status = translate_one(env, &dc);
5436 /* If we reach a page boundary, are single stepping,
5437 or exhaust instruction count, stop generation. */
5438 if (status == NO_EXIT
5439 && (dc.pc >= next_page_start
5440 || tcg_op_buf_full()
5441 || num_insns >= max_insns
5442 || singlestep
5443 || cs->singlestep_enabled)) {
5444 status = EXIT_PC_STALE;
5446 } while (status == NO_EXIT);
5448 if (tb->cflags & CF_LAST_IO) {
5449 gen_io_end();
5452 switch (status) {
5453 case EXIT_GOTO_TB:
5454 case EXIT_NORETURN:
5455 break;
5456 case EXIT_PC_STALE:
5457 update_psw_addr(&dc);
5458 /* FALLTHRU */
5459 case EXIT_PC_UPDATED:
5460 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5461 cc op type is in env */
5462 update_cc_op(&dc);
5463 /* Exit the TB, either by raising a debug exception or by return. */
5464 if (do_debug) {
5465 gen_exception(EXCP_DEBUG);
5466 } else if (use_exit_tb(&dc)) {
5467 tcg_gen_exit_tb(0);
5468 } else {
5469 tcg_gen_lookup_and_goto_ptr(psw_addr);
5471 break;
5472 default:
5473 abort();
5476 gen_tb_end(tb, num_insns);
5478 tb->size = dc.pc - pc_start;
5479 tb->icount = num_insns;
5481 #if defined(S390X_DEBUG_DISAS)
5482 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
5483 && qemu_log_in_addr_range(pc_start)) {
5484 qemu_log_lock();
5485 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5486 log_target_disas(cs, pc_start, dc.pc - pc_start, 1);
5487 qemu_log("\n");
5488 qemu_log_unlock();
5490 #endif
5493 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
5494 target_ulong *data)
5496 int cc_op = data[1];
5497 env->psw.addr = data[0];
5498 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
5499 env->cc_op = cc_op;