target/s390x: Implement LOAD PROGRAM PARAMETER
[qemu.git] / target / s390x / translate.c
blob2b66a4e66f281ff298a23e511743ae47f24ebb3d
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "disas/disas.h"
34 #include "exec/exec-all.h"
35 #include "tcg-op.h"
36 #include "qemu/log.h"
37 #include "qemu/host-utils.h"
38 #include "exec/cpu_ldst.h"
40 /* global register indexes */
41 static TCGv_env cpu_env;
43 #include "exec/gen-icount.h"
44 #include "exec/helper-proto.h"
45 #include "exec/helper-gen.h"
47 #include "trace-tcg.h"
48 #include "exec/log.h"
51 /* Information that (most) every instruction needs to manipulate. */
52 typedef struct DisasContext DisasContext;
53 typedef struct DisasInsn DisasInsn;
54 typedef struct DisasFields DisasFields;
56 struct DisasContext {
57 struct TranslationBlock *tb;
58 const DisasInsn *insn;
59 DisasFields *fields;
60 uint64_t pc, next_pc;
61 enum cc_op cc_op;
62 bool singlestep_enabled;
65 /* Information carried about a condition to be evaluated. */
66 typedef struct {
67 TCGCond cond:8;
68 bool is_64;
69 bool g1;
70 bool g2;
71 union {
72 struct { TCGv_i64 a, b; } s64;
73 struct { TCGv_i32 a, b; } s32;
74 } u;
75 } DisasCompare;
77 #define DISAS_EXCP 4
79 #ifdef DEBUG_INLINE_BRANCHES
80 static uint64_t inline_branch_hit[CC_OP_MAX];
81 static uint64_t inline_branch_miss[CC_OP_MAX];
82 #endif
84 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
86 if (!(s->tb->flags & FLAG_MASK_64)) {
87 if (s->tb->flags & FLAG_MASK_32) {
88 return pc | 0x80000000;
91 return pc;
94 void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
95 int flags)
97 S390CPU *cpu = S390_CPU(cs);
98 CPUS390XState *env = &cpu->env;
99 int i;
101 if (env->cc_op > 3) {
102 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
103 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
104 } else {
105 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
106 env->psw.mask, env->psw.addr, env->cc_op);
109 for (i = 0; i < 16; i++) {
110 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
111 if ((i % 4) == 3) {
112 cpu_fprintf(f, "\n");
113 } else {
114 cpu_fprintf(f, " ");
118 for (i = 0; i < 16; i++) {
119 cpu_fprintf(f, "F%02d=%016" PRIx64, i, get_freg(env, i)->ll);
120 if ((i % 4) == 3) {
121 cpu_fprintf(f, "\n");
122 } else {
123 cpu_fprintf(f, " ");
127 for (i = 0; i < 32; i++) {
128 cpu_fprintf(f, "V%02d=%016" PRIx64 "%016" PRIx64, i,
129 env->vregs[i][0].ll, env->vregs[i][1].ll);
130 cpu_fprintf(f, (i % 2) ? "\n" : " ");
133 #ifndef CONFIG_USER_ONLY
134 for (i = 0; i < 16; i++) {
135 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
136 if ((i % 4) == 3) {
137 cpu_fprintf(f, "\n");
138 } else {
139 cpu_fprintf(f, " ");
142 #endif
144 #ifdef DEBUG_INLINE_BRANCHES
145 for (i = 0; i < CC_OP_MAX; i++) {
146 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
147 inline_branch_miss[i], inline_branch_hit[i]);
149 #endif
151 cpu_fprintf(f, "\n");
154 static TCGv_i64 psw_addr;
155 static TCGv_i64 psw_mask;
156 static TCGv_i64 gbea;
158 static TCGv_i32 cc_op;
159 static TCGv_i64 cc_src;
160 static TCGv_i64 cc_dst;
161 static TCGv_i64 cc_vr;
163 static char cpu_reg_names[32][4];
164 static TCGv_i64 regs[16];
165 static TCGv_i64 fregs[16];
167 void s390x_translate_init(void)
169 int i;
171 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
172 tcg_ctx.tcg_env = cpu_env;
173 psw_addr = tcg_global_mem_new_i64(cpu_env,
174 offsetof(CPUS390XState, psw.addr),
175 "psw_addr");
176 psw_mask = tcg_global_mem_new_i64(cpu_env,
177 offsetof(CPUS390XState, psw.mask),
178 "psw_mask");
179 gbea = tcg_global_mem_new_i64(cpu_env,
180 offsetof(CPUS390XState, gbea),
181 "gbea");
183 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
184 "cc_op");
185 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
186 "cc_src");
187 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
188 "cc_dst");
189 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
190 "cc_vr");
192 for (i = 0; i < 16; i++) {
193 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
194 regs[i] = tcg_global_mem_new(cpu_env,
195 offsetof(CPUS390XState, regs[i]),
196 cpu_reg_names[i]);
199 for (i = 0; i < 16; i++) {
200 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
201 fregs[i] = tcg_global_mem_new(cpu_env,
202 offsetof(CPUS390XState, vregs[i][0].d),
203 cpu_reg_names[i + 16]);
207 static TCGv_i64 load_reg(int reg)
209 TCGv_i64 r = tcg_temp_new_i64();
210 tcg_gen_mov_i64(r, regs[reg]);
211 return r;
214 static TCGv_i64 load_freg32_i64(int reg)
216 TCGv_i64 r = tcg_temp_new_i64();
217 tcg_gen_shri_i64(r, fregs[reg], 32);
218 return r;
221 static void store_reg(int reg, TCGv_i64 v)
223 tcg_gen_mov_i64(regs[reg], v);
226 static void store_freg(int reg, TCGv_i64 v)
228 tcg_gen_mov_i64(fregs[reg], v);
231 static void store_reg32_i64(int reg, TCGv_i64 v)
233 /* 32 bit register writes keep the upper half */
234 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
237 static void store_reg32h_i64(int reg, TCGv_i64 v)
239 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
242 static void store_freg32_i64(int reg, TCGv_i64 v)
244 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
247 static void return_low128(TCGv_i64 dest)
249 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
252 static void update_psw_addr(DisasContext *s)
254 /* psw.addr */
255 tcg_gen_movi_i64(psw_addr, s->pc);
258 static void per_branch(DisasContext *s, bool to_next)
260 #ifndef CONFIG_USER_ONLY
261 tcg_gen_movi_i64(gbea, s->pc);
263 if (s->tb->flags & FLAG_MASK_PER) {
264 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->next_pc) : psw_addr;
265 gen_helper_per_branch(cpu_env, gbea, next_pc);
266 if (to_next) {
267 tcg_temp_free_i64(next_pc);
270 #endif
273 static void per_branch_cond(DisasContext *s, TCGCond cond,
274 TCGv_i64 arg1, TCGv_i64 arg2)
276 #ifndef CONFIG_USER_ONLY
277 if (s->tb->flags & FLAG_MASK_PER) {
278 TCGLabel *lab = gen_new_label();
279 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
281 tcg_gen_movi_i64(gbea, s->pc);
282 gen_helper_per_branch(cpu_env, gbea, psw_addr);
284 gen_set_label(lab);
285 } else {
286 TCGv_i64 pc = tcg_const_i64(s->pc);
287 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
288 tcg_temp_free_i64(pc);
290 #endif
293 static void per_breaking_event(DisasContext *s)
295 tcg_gen_movi_i64(gbea, s->pc);
298 static void update_cc_op(DisasContext *s)
300 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
301 tcg_gen_movi_i32(cc_op, s->cc_op);
305 static void potential_page_fault(DisasContext *s)
307 update_psw_addr(s);
308 update_cc_op(s);
311 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
313 return (uint64_t)cpu_lduw_code(env, pc);
316 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
318 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
321 static int get_mem_index(DisasContext *s)
323 switch (s->tb->flags & FLAG_MASK_ASC) {
324 case PSW_ASC_PRIMARY >> 32:
325 return 0;
326 case PSW_ASC_SECONDARY >> 32:
327 return 1;
328 case PSW_ASC_HOME >> 32:
329 return 2;
330 default:
331 tcg_abort();
332 break;
336 static void gen_exception(int excp)
338 TCGv_i32 tmp = tcg_const_i32(excp);
339 gen_helper_exception(cpu_env, tmp);
340 tcg_temp_free_i32(tmp);
343 static void gen_program_exception(DisasContext *s, int code)
345 TCGv_i32 tmp;
347 /* Remember what pgm exeption this was. */
348 tmp = tcg_const_i32(code);
349 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
350 tcg_temp_free_i32(tmp);
352 tmp = tcg_const_i32(s->next_pc - s->pc);
353 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
354 tcg_temp_free_i32(tmp);
356 /* Advance past instruction. */
357 s->pc = s->next_pc;
358 update_psw_addr(s);
360 /* Save off cc. */
361 update_cc_op(s);
363 /* Trigger exception. */
364 gen_exception(EXCP_PGM);
367 static inline void gen_illegal_opcode(DisasContext *s)
369 gen_program_exception(s, PGM_OPERATION);
372 static inline void gen_trap(DisasContext *s)
374 TCGv_i32 t;
376 /* Set DXC to 0xff. */
377 t = tcg_temp_new_i32();
378 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
379 tcg_gen_ori_i32(t, t, 0xff00);
380 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
381 tcg_temp_free_i32(t);
383 gen_program_exception(s, PGM_DATA);
386 #ifndef CONFIG_USER_ONLY
387 static void check_privileged(DisasContext *s)
389 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
390 gen_program_exception(s, PGM_PRIVILEGED);
393 #endif
395 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
397 TCGv_i64 tmp = tcg_temp_new_i64();
398 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
400 /* Note that d2 is limited to 20 bits, signed. If we crop negative
401 displacements early we create larger immedate addends. */
403 /* Note that addi optimizes the imm==0 case. */
404 if (b2 && x2) {
405 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
406 tcg_gen_addi_i64(tmp, tmp, d2);
407 } else if (b2) {
408 tcg_gen_addi_i64(tmp, regs[b2], d2);
409 } else if (x2) {
410 tcg_gen_addi_i64(tmp, regs[x2], d2);
411 } else {
412 if (need_31) {
413 d2 &= 0x7fffffff;
414 need_31 = false;
416 tcg_gen_movi_i64(tmp, d2);
418 if (need_31) {
419 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
422 return tmp;
425 static inline bool live_cc_data(DisasContext *s)
427 return (s->cc_op != CC_OP_DYNAMIC
428 && s->cc_op != CC_OP_STATIC
429 && s->cc_op > 3);
432 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
434 if (live_cc_data(s)) {
435 tcg_gen_discard_i64(cc_src);
436 tcg_gen_discard_i64(cc_dst);
437 tcg_gen_discard_i64(cc_vr);
439 s->cc_op = CC_OP_CONST0 + val;
442 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
444 if (live_cc_data(s)) {
445 tcg_gen_discard_i64(cc_src);
446 tcg_gen_discard_i64(cc_vr);
448 tcg_gen_mov_i64(cc_dst, dst);
449 s->cc_op = op;
452 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
453 TCGv_i64 dst)
455 if (live_cc_data(s)) {
456 tcg_gen_discard_i64(cc_vr);
458 tcg_gen_mov_i64(cc_src, src);
459 tcg_gen_mov_i64(cc_dst, dst);
460 s->cc_op = op;
463 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
464 TCGv_i64 dst, TCGv_i64 vr)
466 tcg_gen_mov_i64(cc_src, src);
467 tcg_gen_mov_i64(cc_dst, dst);
468 tcg_gen_mov_i64(cc_vr, vr);
469 s->cc_op = op;
472 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
474 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
477 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
479 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
482 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
484 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
487 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
489 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
492 /* CC value is in env->cc_op */
493 static void set_cc_static(DisasContext *s)
495 if (live_cc_data(s)) {
496 tcg_gen_discard_i64(cc_src);
497 tcg_gen_discard_i64(cc_dst);
498 tcg_gen_discard_i64(cc_vr);
500 s->cc_op = CC_OP_STATIC;
503 /* calculates cc into cc_op */
504 static void gen_op_calc_cc(DisasContext *s)
506 TCGv_i32 local_cc_op;
507 TCGv_i64 dummy;
509 TCGV_UNUSED_I32(local_cc_op);
510 TCGV_UNUSED_I64(dummy);
511 switch (s->cc_op) {
512 default:
513 dummy = tcg_const_i64(0);
514 /* FALLTHRU */
515 case CC_OP_ADD_64:
516 case CC_OP_ADDU_64:
517 case CC_OP_ADDC_64:
518 case CC_OP_SUB_64:
519 case CC_OP_SUBU_64:
520 case CC_OP_SUBB_64:
521 case CC_OP_ADD_32:
522 case CC_OP_ADDU_32:
523 case CC_OP_ADDC_32:
524 case CC_OP_SUB_32:
525 case CC_OP_SUBU_32:
526 case CC_OP_SUBB_32:
527 local_cc_op = tcg_const_i32(s->cc_op);
528 break;
529 case CC_OP_CONST0:
530 case CC_OP_CONST1:
531 case CC_OP_CONST2:
532 case CC_OP_CONST3:
533 case CC_OP_STATIC:
534 case CC_OP_DYNAMIC:
535 break;
538 switch (s->cc_op) {
539 case CC_OP_CONST0:
540 case CC_OP_CONST1:
541 case CC_OP_CONST2:
542 case CC_OP_CONST3:
543 /* s->cc_op is the cc value */
544 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
545 break;
546 case CC_OP_STATIC:
547 /* env->cc_op already is the cc value */
548 break;
549 case CC_OP_NZ:
550 case CC_OP_ABS_64:
551 case CC_OP_NABS_64:
552 case CC_OP_ABS_32:
553 case CC_OP_NABS_32:
554 case CC_OP_LTGT0_32:
555 case CC_OP_LTGT0_64:
556 case CC_OP_COMP_32:
557 case CC_OP_COMP_64:
558 case CC_OP_NZ_F32:
559 case CC_OP_NZ_F64:
560 case CC_OP_FLOGR:
561 /* 1 argument */
562 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
563 break;
564 case CC_OP_ICM:
565 case CC_OP_LTGT_32:
566 case CC_OP_LTGT_64:
567 case CC_OP_LTUGTU_32:
568 case CC_OP_LTUGTU_64:
569 case CC_OP_TM_32:
570 case CC_OP_TM_64:
571 case CC_OP_SLA_32:
572 case CC_OP_SLA_64:
573 case CC_OP_NZ_F128:
574 /* 2 arguments */
575 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
576 break;
577 case CC_OP_ADD_64:
578 case CC_OP_ADDU_64:
579 case CC_OP_ADDC_64:
580 case CC_OP_SUB_64:
581 case CC_OP_SUBU_64:
582 case CC_OP_SUBB_64:
583 case CC_OP_ADD_32:
584 case CC_OP_ADDU_32:
585 case CC_OP_ADDC_32:
586 case CC_OP_SUB_32:
587 case CC_OP_SUBU_32:
588 case CC_OP_SUBB_32:
589 /* 3 arguments */
590 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
591 break;
592 case CC_OP_DYNAMIC:
593 /* unknown operation - assume 3 arguments and cc_op in env */
594 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
595 break;
596 default:
597 tcg_abort();
600 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
601 tcg_temp_free_i32(local_cc_op);
603 if (!TCGV_IS_UNUSED_I64(dummy)) {
604 tcg_temp_free_i64(dummy);
607 /* We now have cc in cc_op as constant */
608 set_cc_static(s);
611 static int use_goto_tb(DisasContext *s, uint64_t dest)
613 if (unlikely(s->singlestep_enabled) ||
614 (s->tb->cflags & CF_LAST_IO) ||
615 (s->tb->flags & FLAG_MASK_PER)) {
616 return false;
618 #ifndef CONFIG_USER_ONLY
619 return (dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) ||
620 (dest & TARGET_PAGE_MASK) == (s->pc & TARGET_PAGE_MASK);
621 #else
622 return true;
623 #endif
626 static void account_noninline_branch(DisasContext *s, int cc_op)
628 #ifdef DEBUG_INLINE_BRANCHES
629 inline_branch_miss[cc_op]++;
630 #endif
633 static void account_inline_branch(DisasContext *s, int cc_op)
635 #ifdef DEBUG_INLINE_BRANCHES
636 inline_branch_hit[cc_op]++;
637 #endif
640 /* Table of mask values to comparison codes, given a comparison as input.
641 For such, CC=3 should not be possible. */
642 static const TCGCond ltgt_cond[16] = {
643 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
644 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
645 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
646 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
647 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
648 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
649 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
650 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
653 /* Table of mask values to comparison codes, given a logic op as input.
654 For such, only CC=0 and CC=1 should be possible. */
655 static const TCGCond nz_cond[16] = {
656 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
657 TCG_COND_NEVER, TCG_COND_NEVER,
658 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
659 TCG_COND_NE, TCG_COND_NE,
660 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
661 TCG_COND_EQ, TCG_COND_EQ,
662 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
663 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
666 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
667 details required to generate a TCG comparison. */
668 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
670 TCGCond cond;
671 enum cc_op old_cc_op = s->cc_op;
673 if (mask == 15 || mask == 0) {
674 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
675 c->u.s32.a = cc_op;
676 c->u.s32.b = cc_op;
677 c->g1 = c->g2 = true;
678 c->is_64 = false;
679 return;
682 /* Find the TCG condition for the mask + cc op. */
683 switch (old_cc_op) {
684 case CC_OP_LTGT0_32:
685 case CC_OP_LTGT0_64:
686 case CC_OP_LTGT_32:
687 case CC_OP_LTGT_64:
688 cond = ltgt_cond[mask];
689 if (cond == TCG_COND_NEVER) {
690 goto do_dynamic;
692 account_inline_branch(s, old_cc_op);
693 break;
695 case CC_OP_LTUGTU_32:
696 case CC_OP_LTUGTU_64:
697 cond = tcg_unsigned_cond(ltgt_cond[mask]);
698 if (cond == TCG_COND_NEVER) {
699 goto do_dynamic;
701 account_inline_branch(s, old_cc_op);
702 break;
704 case CC_OP_NZ:
705 cond = nz_cond[mask];
706 if (cond == TCG_COND_NEVER) {
707 goto do_dynamic;
709 account_inline_branch(s, old_cc_op);
710 break;
712 case CC_OP_TM_32:
713 case CC_OP_TM_64:
714 switch (mask) {
715 case 8:
716 cond = TCG_COND_EQ;
717 break;
718 case 4 | 2 | 1:
719 cond = TCG_COND_NE;
720 break;
721 default:
722 goto do_dynamic;
724 account_inline_branch(s, old_cc_op);
725 break;
727 case CC_OP_ICM:
728 switch (mask) {
729 case 8:
730 cond = TCG_COND_EQ;
731 break;
732 case 4 | 2 | 1:
733 case 4 | 2:
734 cond = TCG_COND_NE;
735 break;
736 default:
737 goto do_dynamic;
739 account_inline_branch(s, old_cc_op);
740 break;
742 case CC_OP_FLOGR:
743 switch (mask & 0xa) {
744 case 8: /* src == 0 -> no one bit found */
745 cond = TCG_COND_EQ;
746 break;
747 case 2: /* src != 0 -> one bit found */
748 cond = TCG_COND_NE;
749 break;
750 default:
751 goto do_dynamic;
753 account_inline_branch(s, old_cc_op);
754 break;
756 case CC_OP_ADDU_32:
757 case CC_OP_ADDU_64:
758 switch (mask) {
759 case 8 | 2: /* vr == 0 */
760 cond = TCG_COND_EQ;
761 break;
762 case 4 | 1: /* vr != 0 */
763 cond = TCG_COND_NE;
764 break;
765 case 8 | 4: /* no carry -> vr >= src */
766 cond = TCG_COND_GEU;
767 break;
768 case 2 | 1: /* carry -> vr < src */
769 cond = TCG_COND_LTU;
770 break;
771 default:
772 goto do_dynamic;
774 account_inline_branch(s, old_cc_op);
775 break;
777 case CC_OP_SUBU_32:
778 case CC_OP_SUBU_64:
779 /* Note that CC=0 is impossible; treat it as dont-care. */
780 switch (mask & 7) {
781 case 2: /* zero -> op1 == op2 */
782 cond = TCG_COND_EQ;
783 break;
784 case 4 | 1: /* !zero -> op1 != op2 */
785 cond = TCG_COND_NE;
786 break;
787 case 4: /* borrow (!carry) -> op1 < op2 */
788 cond = TCG_COND_LTU;
789 break;
790 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
791 cond = TCG_COND_GEU;
792 break;
793 default:
794 goto do_dynamic;
796 account_inline_branch(s, old_cc_op);
797 break;
799 default:
800 do_dynamic:
801 /* Calculate cc value. */
802 gen_op_calc_cc(s);
803 /* FALLTHRU */
805 case CC_OP_STATIC:
806 /* Jump based on CC. We'll load up the real cond below;
807 the assignment here merely avoids a compiler warning. */
808 account_noninline_branch(s, old_cc_op);
809 old_cc_op = CC_OP_STATIC;
810 cond = TCG_COND_NEVER;
811 break;
814 /* Load up the arguments of the comparison. */
815 c->is_64 = true;
816 c->g1 = c->g2 = false;
817 switch (old_cc_op) {
818 case CC_OP_LTGT0_32:
819 c->is_64 = false;
820 c->u.s32.a = tcg_temp_new_i32();
821 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
822 c->u.s32.b = tcg_const_i32(0);
823 break;
824 case CC_OP_LTGT_32:
825 case CC_OP_LTUGTU_32:
826 case CC_OP_SUBU_32:
827 c->is_64 = false;
828 c->u.s32.a = tcg_temp_new_i32();
829 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
830 c->u.s32.b = tcg_temp_new_i32();
831 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
832 break;
834 case CC_OP_LTGT0_64:
835 case CC_OP_NZ:
836 case CC_OP_FLOGR:
837 c->u.s64.a = cc_dst;
838 c->u.s64.b = tcg_const_i64(0);
839 c->g1 = true;
840 break;
841 case CC_OP_LTGT_64:
842 case CC_OP_LTUGTU_64:
843 case CC_OP_SUBU_64:
844 c->u.s64.a = cc_src;
845 c->u.s64.b = cc_dst;
846 c->g1 = c->g2 = true;
847 break;
849 case CC_OP_TM_32:
850 case CC_OP_TM_64:
851 case CC_OP_ICM:
852 c->u.s64.a = tcg_temp_new_i64();
853 c->u.s64.b = tcg_const_i64(0);
854 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
855 break;
857 case CC_OP_ADDU_32:
858 c->is_64 = false;
859 c->u.s32.a = tcg_temp_new_i32();
860 c->u.s32.b = tcg_temp_new_i32();
861 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
862 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
863 tcg_gen_movi_i32(c->u.s32.b, 0);
864 } else {
865 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
867 break;
869 case CC_OP_ADDU_64:
870 c->u.s64.a = cc_vr;
871 c->g1 = true;
872 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
873 c->u.s64.b = tcg_const_i64(0);
874 } else {
875 c->u.s64.b = cc_src;
876 c->g2 = true;
878 break;
880 case CC_OP_STATIC:
881 c->is_64 = false;
882 c->u.s32.a = cc_op;
883 c->g1 = true;
884 switch (mask) {
885 case 0x8 | 0x4 | 0x2: /* cc != 3 */
886 cond = TCG_COND_NE;
887 c->u.s32.b = tcg_const_i32(3);
888 break;
889 case 0x8 | 0x4 | 0x1: /* cc != 2 */
890 cond = TCG_COND_NE;
891 c->u.s32.b = tcg_const_i32(2);
892 break;
893 case 0x8 | 0x2 | 0x1: /* cc != 1 */
894 cond = TCG_COND_NE;
895 c->u.s32.b = tcg_const_i32(1);
896 break;
897 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
898 cond = TCG_COND_EQ;
899 c->g1 = false;
900 c->u.s32.a = tcg_temp_new_i32();
901 c->u.s32.b = tcg_const_i32(0);
902 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
903 break;
904 case 0x8 | 0x4: /* cc < 2 */
905 cond = TCG_COND_LTU;
906 c->u.s32.b = tcg_const_i32(2);
907 break;
908 case 0x8: /* cc == 0 */
909 cond = TCG_COND_EQ;
910 c->u.s32.b = tcg_const_i32(0);
911 break;
912 case 0x4 | 0x2 | 0x1: /* cc != 0 */
913 cond = TCG_COND_NE;
914 c->u.s32.b = tcg_const_i32(0);
915 break;
916 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
917 cond = TCG_COND_NE;
918 c->g1 = false;
919 c->u.s32.a = tcg_temp_new_i32();
920 c->u.s32.b = tcg_const_i32(0);
921 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
922 break;
923 case 0x4: /* cc == 1 */
924 cond = TCG_COND_EQ;
925 c->u.s32.b = tcg_const_i32(1);
926 break;
927 case 0x2 | 0x1: /* cc > 1 */
928 cond = TCG_COND_GTU;
929 c->u.s32.b = tcg_const_i32(1);
930 break;
931 case 0x2: /* cc == 2 */
932 cond = TCG_COND_EQ;
933 c->u.s32.b = tcg_const_i32(2);
934 break;
935 case 0x1: /* cc == 3 */
936 cond = TCG_COND_EQ;
937 c->u.s32.b = tcg_const_i32(3);
938 break;
939 default:
940 /* CC is masked by something else: (8 >> cc) & mask. */
941 cond = TCG_COND_NE;
942 c->g1 = false;
943 c->u.s32.a = tcg_const_i32(8);
944 c->u.s32.b = tcg_const_i32(0);
945 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
946 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
947 break;
949 break;
951 default:
952 abort();
954 c->cond = cond;
957 static void free_compare(DisasCompare *c)
959 if (!c->g1) {
960 if (c->is_64) {
961 tcg_temp_free_i64(c->u.s64.a);
962 } else {
963 tcg_temp_free_i32(c->u.s32.a);
966 if (!c->g2) {
967 if (c->is_64) {
968 tcg_temp_free_i64(c->u.s64.b);
969 } else {
970 tcg_temp_free_i32(c->u.s32.b);
975 /* ====================================================================== */
976 /* Define the insn format enumeration. */
977 #define F0(N) FMT_##N,
978 #define F1(N, X1) F0(N)
979 #define F2(N, X1, X2) F0(N)
980 #define F3(N, X1, X2, X3) F0(N)
981 #define F4(N, X1, X2, X3, X4) F0(N)
982 #define F5(N, X1, X2, X3, X4, X5) F0(N)
984 typedef enum {
985 #include "insn-format.def"
986 } DisasFormat;
988 #undef F0
989 #undef F1
990 #undef F2
991 #undef F3
992 #undef F4
993 #undef F5
995 /* Define a structure to hold the decoded fields. We'll store each inside
996 an array indexed by an enum. In order to conserve memory, we'll arrange
997 for fields that do not exist at the same time to overlap, thus the "C"
998 for compact. For checking purposes there is an "O" for original index
999 as well that will be applied to availability bitmaps. */
1001 enum DisasFieldIndexO {
1002 FLD_O_r1,
1003 FLD_O_r2,
1004 FLD_O_r3,
1005 FLD_O_m1,
1006 FLD_O_m3,
1007 FLD_O_m4,
1008 FLD_O_b1,
1009 FLD_O_b2,
1010 FLD_O_b4,
1011 FLD_O_d1,
1012 FLD_O_d2,
1013 FLD_O_d4,
1014 FLD_O_x2,
1015 FLD_O_l1,
1016 FLD_O_l2,
1017 FLD_O_i1,
1018 FLD_O_i2,
1019 FLD_O_i3,
1020 FLD_O_i4,
1021 FLD_O_i5
1024 enum DisasFieldIndexC {
1025 FLD_C_r1 = 0,
1026 FLD_C_m1 = 0,
1027 FLD_C_b1 = 0,
1028 FLD_C_i1 = 0,
1030 FLD_C_r2 = 1,
1031 FLD_C_b2 = 1,
1032 FLD_C_i2 = 1,
1034 FLD_C_r3 = 2,
1035 FLD_C_m3 = 2,
1036 FLD_C_i3 = 2,
1038 FLD_C_m4 = 3,
1039 FLD_C_b4 = 3,
1040 FLD_C_i4 = 3,
1041 FLD_C_l1 = 3,
1043 FLD_C_i5 = 4,
1044 FLD_C_d1 = 4,
1046 FLD_C_d2 = 5,
1048 FLD_C_d4 = 6,
1049 FLD_C_x2 = 6,
1050 FLD_C_l2 = 6,
1052 NUM_C_FIELD = 7
1055 struct DisasFields {
1056 uint64_t raw_insn;
1057 unsigned op:8;
1058 unsigned op2:8;
1059 unsigned presentC:16;
1060 unsigned int presentO;
1061 int c[NUM_C_FIELD];
1064 /* This is the way fields are to be accessed out of DisasFields. */
1065 #define have_field(S, F) have_field1((S), FLD_O_##F)
1066 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1068 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1070 return (f->presentO >> c) & 1;
1073 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1074 enum DisasFieldIndexC c)
1076 assert(have_field1(f, o));
1077 return f->c[c];
1080 /* Describe the layout of each field in each format. */
1081 typedef struct DisasField {
1082 unsigned int beg:8;
1083 unsigned int size:8;
1084 unsigned int type:2;
1085 unsigned int indexC:6;
1086 enum DisasFieldIndexO indexO:8;
1087 } DisasField;
1089 typedef struct DisasFormatInfo {
1090 DisasField op[NUM_C_FIELD];
1091 } DisasFormatInfo;
1093 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1094 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1095 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1096 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1097 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1098 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1099 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1100 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1101 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1102 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1103 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1104 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1105 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1106 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1108 #define F0(N) { { } },
1109 #define F1(N, X1) { { X1 } },
1110 #define F2(N, X1, X2) { { X1, X2 } },
1111 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1112 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1113 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1115 static const DisasFormatInfo format_info[] = {
1116 #include "insn-format.def"
1119 #undef F0
1120 #undef F1
1121 #undef F2
1122 #undef F3
1123 #undef F4
1124 #undef F5
1125 #undef R
1126 #undef M
1127 #undef BD
1128 #undef BXD
1129 #undef BDL
1130 #undef BXDL
1131 #undef I
1132 #undef L
1134 /* Generally, we'll extract operands into this structures, operate upon
1135 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1136 of routines below for more details. */
1137 typedef struct {
1138 bool g_out, g_out2, g_in1, g_in2;
1139 TCGv_i64 out, out2, in1, in2;
1140 TCGv_i64 addr1;
1141 } DisasOps;
1143 /* Instructions can place constraints on their operands, raising specification
1144 exceptions if they are violated. To make this easy to automate, each "in1",
1145 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1146 of the following, or 0. To make this easy to document, we'll put the
1147 SPEC_<name> defines next to <name>. */
1149 #define SPEC_r1_even 1
1150 #define SPEC_r2_even 2
1151 #define SPEC_r3_even 4
1152 #define SPEC_r1_f128 8
1153 #define SPEC_r2_f128 16
1155 /* Return values from translate_one, indicating the state of the TB. */
1156 typedef enum {
1157 /* Continue the TB. */
1158 NO_EXIT,
1159 /* We have emitted one or more goto_tb. No fixup required. */
1160 EXIT_GOTO_TB,
1161 /* We are not using a goto_tb (for whatever reason), but have updated
1162 the PC (for whatever reason), so there's no need to do it again on
1163 exiting the TB. */
1164 EXIT_PC_UPDATED,
1165 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1166 updated the PC for the next instruction to be executed. */
1167 EXIT_PC_STALE,
1168 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1169 No following code will be executed. */
1170 EXIT_NORETURN,
1171 } ExitStatus;
1173 typedef enum DisasFacility {
1174 FAC_Z, /* zarch (default) */
1175 FAC_CASS, /* compare and swap and store */
1176 FAC_CASS2, /* compare and swap and store 2*/
1177 FAC_DFP, /* decimal floating point */
1178 FAC_DFPR, /* decimal floating point rounding */
1179 FAC_DO, /* distinct operands */
1180 FAC_EE, /* execute extensions */
1181 FAC_EI, /* extended immediate */
1182 FAC_FPE, /* floating point extension */
1183 FAC_FPSSH, /* floating point support sign handling */
1184 FAC_FPRGR, /* FPR-GR transfer */
1185 FAC_GIE, /* general instructions extension */
1186 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1187 FAC_HW, /* high-word */
1188 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1189 FAC_MIE, /* miscellaneous-instruction-extensions */
1190 FAC_LAT, /* load-and-trap */
1191 FAC_LOC, /* load/store on condition */
1192 FAC_LD, /* long displacement */
1193 FAC_PC, /* population count */
1194 FAC_SCF, /* store clock fast */
1195 FAC_SFLE, /* store facility list extended */
1196 FAC_ILA, /* interlocked access facility 1 */
1197 FAC_LPP, /* load-program-parameter */
1198 } DisasFacility;
1200 struct DisasInsn {
1201 unsigned opc:16;
1202 DisasFormat fmt:8;
1203 DisasFacility fac:8;
1204 unsigned spec:8;
1206 const char *name;
1208 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1209 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1210 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1211 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1212 void (*help_cout)(DisasContext *, DisasOps *);
1213 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1215 uint64_t data;
1218 /* ====================================================================== */
1219 /* Miscellaneous helpers, used by several operations. */
1221 static void help_l2_shift(DisasContext *s, DisasFields *f,
1222 DisasOps *o, int mask)
1224 int b2 = get_field(f, b2);
1225 int d2 = get_field(f, d2);
1227 if (b2 == 0) {
1228 o->in2 = tcg_const_i64(d2 & mask);
1229 } else {
1230 o->in2 = get_address(s, 0, b2, d2);
1231 tcg_gen_andi_i64(o->in2, o->in2, mask);
1235 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1237 if (dest == s->next_pc) {
1238 per_branch(s, true);
1239 return NO_EXIT;
1241 if (use_goto_tb(s, dest)) {
1242 update_cc_op(s);
1243 per_breaking_event(s);
1244 tcg_gen_goto_tb(0);
1245 tcg_gen_movi_i64(psw_addr, dest);
1246 tcg_gen_exit_tb((uintptr_t)s->tb);
1247 return EXIT_GOTO_TB;
1248 } else {
1249 tcg_gen_movi_i64(psw_addr, dest);
1250 per_branch(s, false);
1251 return EXIT_PC_UPDATED;
1255 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1256 bool is_imm, int imm, TCGv_i64 cdest)
1258 ExitStatus ret;
1259 uint64_t dest = s->pc + 2 * imm;
1260 TCGLabel *lab;
1262 /* Take care of the special cases first. */
1263 if (c->cond == TCG_COND_NEVER) {
1264 ret = NO_EXIT;
1265 goto egress;
1267 if (is_imm) {
1268 if (dest == s->next_pc) {
1269 /* Branch to next. */
1270 per_branch(s, true);
1271 ret = NO_EXIT;
1272 goto egress;
1274 if (c->cond == TCG_COND_ALWAYS) {
1275 ret = help_goto_direct(s, dest);
1276 goto egress;
1278 } else {
1279 if (TCGV_IS_UNUSED_I64(cdest)) {
1280 /* E.g. bcr %r0 -> no branch. */
1281 ret = NO_EXIT;
1282 goto egress;
1284 if (c->cond == TCG_COND_ALWAYS) {
1285 tcg_gen_mov_i64(psw_addr, cdest);
1286 per_branch(s, false);
1287 ret = EXIT_PC_UPDATED;
1288 goto egress;
1292 if (use_goto_tb(s, s->next_pc)) {
1293 if (is_imm && use_goto_tb(s, dest)) {
1294 /* Both exits can use goto_tb. */
1295 update_cc_op(s);
1297 lab = gen_new_label();
1298 if (c->is_64) {
1299 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1300 } else {
1301 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1304 /* Branch not taken. */
1305 tcg_gen_goto_tb(0);
1306 tcg_gen_movi_i64(psw_addr, s->next_pc);
1307 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1309 /* Branch taken. */
1310 gen_set_label(lab);
1311 per_breaking_event(s);
1312 tcg_gen_goto_tb(1);
1313 tcg_gen_movi_i64(psw_addr, dest);
1314 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1316 ret = EXIT_GOTO_TB;
1317 } else {
1318 /* Fallthru can use goto_tb, but taken branch cannot. */
1319 /* Store taken branch destination before the brcond. This
1320 avoids having to allocate a new local temp to hold it.
1321 We'll overwrite this in the not taken case anyway. */
1322 if (!is_imm) {
1323 tcg_gen_mov_i64(psw_addr, cdest);
1326 lab = gen_new_label();
1327 if (c->is_64) {
1328 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1329 } else {
1330 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1333 /* Branch not taken. */
1334 update_cc_op(s);
1335 tcg_gen_goto_tb(0);
1336 tcg_gen_movi_i64(psw_addr, s->next_pc);
1337 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1339 gen_set_label(lab);
1340 if (is_imm) {
1341 tcg_gen_movi_i64(psw_addr, dest);
1343 per_breaking_event(s);
1344 ret = EXIT_PC_UPDATED;
1346 } else {
1347 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1348 Most commonly we're single-stepping or some other condition that
1349 disables all use of goto_tb. Just update the PC and exit. */
1351 TCGv_i64 next = tcg_const_i64(s->next_pc);
1352 if (is_imm) {
1353 cdest = tcg_const_i64(dest);
1356 if (c->is_64) {
1357 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1358 cdest, next);
1359 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1360 } else {
1361 TCGv_i32 t0 = tcg_temp_new_i32();
1362 TCGv_i64 t1 = tcg_temp_new_i64();
1363 TCGv_i64 z = tcg_const_i64(0);
1364 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1365 tcg_gen_extu_i32_i64(t1, t0);
1366 tcg_temp_free_i32(t0);
1367 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1368 per_branch_cond(s, TCG_COND_NE, t1, z);
1369 tcg_temp_free_i64(t1);
1370 tcg_temp_free_i64(z);
1373 if (is_imm) {
1374 tcg_temp_free_i64(cdest);
1376 tcg_temp_free_i64(next);
1378 ret = EXIT_PC_UPDATED;
1381 egress:
1382 free_compare(c);
1383 return ret;
1386 /* ====================================================================== */
1387 /* The operations. These perform the bulk of the work for any insn,
1388 usually after the operands have been loaded and output initialized. */
1390 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1392 TCGv_i64 z, n;
1393 z = tcg_const_i64(0);
1394 n = tcg_temp_new_i64();
1395 tcg_gen_neg_i64(n, o->in2);
1396 tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1397 tcg_temp_free_i64(n);
1398 tcg_temp_free_i64(z);
1399 return NO_EXIT;
1402 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1404 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1405 return NO_EXIT;
1408 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1410 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1411 return NO_EXIT;
1414 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1416 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1417 tcg_gen_mov_i64(o->out2, o->in2);
1418 return NO_EXIT;
1421 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1423 tcg_gen_add_i64(o->out, o->in1, o->in2);
1424 return NO_EXIT;
1427 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1429 DisasCompare cmp;
1430 TCGv_i64 carry;
1432 tcg_gen_add_i64(o->out, o->in1, o->in2);
1434 /* The carry flag is the msb of CC, therefore the branch mask that would
1435 create that comparison is 3. Feeding the generated comparison to
1436 setcond produces the carry flag that we desire. */
1437 disas_jcc(s, &cmp, 3);
1438 carry = tcg_temp_new_i64();
1439 if (cmp.is_64) {
1440 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1441 } else {
1442 TCGv_i32 t = tcg_temp_new_i32();
1443 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1444 tcg_gen_extu_i32_i64(carry, t);
1445 tcg_temp_free_i32(t);
1447 free_compare(&cmp);
1449 tcg_gen_add_i64(o->out, o->out, carry);
1450 tcg_temp_free_i64(carry);
1451 return NO_EXIT;
1454 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1456 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1457 return NO_EXIT;
1460 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1462 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1463 return NO_EXIT;
1466 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1468 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1469 return_low128(o->out2);
1470 return NO_EXIT;
1473 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1475 tcg_gen_and_i64(o->out, o->in1, o->in2);
1476 return NO_EXIT;
1479 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1481 int shift = s->insn->data & 0xff;
1482 int size = s->insn->data >> 8;
1483 uint64_t mask = ((1ull << size) - 1) << shift;
1485 assert(!o->g_in2);
1486 tcg_gen_shli_i64(o->in2, o->in2, shift);
1487 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1488 tcg_gen_and_i64(o->out, o->in1, o->in2);
1490 /* Produce the CC from only the bits manipulated. */
1491 tcg_gen_andi_i64(cc_dst, o->out, mask);
1492 set_cc_nz_u64(s, cc_dst);
1493 return NO_EXIT;
1496 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1498 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1499 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1500 tcg_gen_mov_i64(psw_addr, o->in2);
1501 per_branch(s, false);
1502 return EXIT_PC_UPDATED;
1503 } else {
1504 return NO_EXIT;
1508 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1510 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1511 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1514 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1516 int m1 = get_field(s->fields, m1);
1517 bool is_imm = have_field(s->fields, i2);
1518 int imm = is_imm ? get_field(s->fields, i2) : 0;
1519 DisasCompare c;
1521 disas_jcc(s, &c, m1);
1522 return help_branch(s, &c, is_imm, imm, o->in2);
1525 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1527 int r1 = get_field(s->fields, r1);
1528 bool is_imm = have_field(s->fields, i2);
1529 int imm = is_imm ? get_field(s->fields, i2) : 0;
1530 DisasCompare c;
1531 TCGv_i64 t;
1533 c.cond = TCG_COND_NE;
1534 c.is_64 = false;
1535 c.g1 = false;
1536 c.g2 = false;
1538 t = tcg_temp_new_i64();
1539 tcg_gen_subi_i64(t, regs[r1], 1);
1540 store_reg32_i64(r1, t);
1541 c.u.s32.a = tcg_temp_new_i32();
1542 c.u.s32.b = tcg_const_i32(0);
1543 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1544 tcg_temp_free_i64(t);
1546 return help_branch(s, &c, is_imm, imm, o->in2);
1549 static ExitStatus op_bcth(DisasContext *s, DisasOps *o)
1551 int r1 = get_field(s->fields, r1);
1552 int imm = get_field(s->fields, i2);
1553 DisasCompare c;
1554 TCGv_i64 t;
1556 c.cond = TCG_COND_NE;
1557 c.is_64 = false;
1558 c.g1 = false;
1559 c.g2 = false;
1561 t = tcg_temp_new_i64();
1562 tcg_gen_shri_i64(t, regs[r1], 32);
1563 tcg_gen_subi_i64(t, t, 1);
1564 store_reg32h_i64(r1, t);
1565 c.u.s32.a = tcg_temp_new_i32();
1566 c.u.s32.b = tcg_const_i32(0);
1567 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1568 tcg_temp_free_i64(t);
1570 return help_branch(s, &c, 1, imm, o->in2);
1573 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1575 int r1 = get_field(s->fields, r1);
1576 bool is_imm = have_field(s->fields, i2);
1577 int imm = is_imm ? get_field(s->fields, i2) : 0;
1578 DisasCompare c;
1580 c.cond = TCG_COND_NE;
1581 c.is_64 = true;
1582 c.g1 = true;
1583 c.g2 = false;
1585 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1586 c.u.s64.a = regs[r1];
1587 c.u.s64.b = tcg_const_i64(0);
1589 return help_branch(s, &c, is_imm, imm, o->in2);
1592 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1594 int r1 = get_field(s->fields, r1);
1595 int r3 = get_field(s->fields, r3);
1596 bool is_imm = have_field(s->fields, i2);
1597 int imm = is_imm ? get_field(s->fields, i2) : 0;
1598 DisasCompare c;
1599 TCGv_i64 t;
1601 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1602 c.is_64 = false;
1603 c.g1 = false;
1604 c.g2 = false;
1606 t = tcg_temp_new_i64();
1607 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1608 c.u.s32.a = tcg_temp_new_i32();
1609 c.u.s32.b = tcg_temp_new_i32();
1610 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1611 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1612 store_reg32_i64(r1, t);
1613 tcg_temp_free_i64(t);
1615 return help_branch(s, &c, is_imm, imm, o->in2);
1618 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1620 int r1 = get_field(s->fields, r1);
1621 int r3 = get_field(s->fields, r3);
1622 bool is_imm = have_field(s->fields, i2);
1623 int imm = is_imm ? get_field(s->fields, i2) : 0;
1624 DisasCompare c;
1626 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1627 c.is_64 = true;
1629 if (r1 == (r3 | 1)) {
1630 c.u.s64.b = load_reg(r3 | 1);
1631 c.g2 = false;
1632 } else {
1633 c.u.s64.b = regs[r3 | 1];
1634 c.g2 = true;
1637 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1638 c.u.s64.a = regs[r1];
1639 c.g1 = true;
1641 return help_branch(s, &c, is_imm, imm, o->in2);
1644 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1646 int imm, m3 = get_field(s->fields, m3);
1647 bool is_imm;
1648 DisasCompare c;
1650 c.cond = ltgt_cond[m3];
1651 if (s->insn->data) {
1652 c.cond = tcg_unsigned_cond(c.cond);
1654 c.is_64 = c.g1 = c.g2 = true;
1655 c.u.s64.a = o->in1;
1656 c.u.s64.b = o->in2;
1658 is_imm = have_field(s->fields, i4);
1659 if (is_imm) {
1660 imm = get_field(s->fields, i4);
1661 } else {
1662 imm = 0;
1663 o->out = get_address(s, 0, get_field(s->fields, b4),
1664 get_field(s->fields, d4));
1667 return help_branch(s, &c, is_imm, imm, o->out);
1670 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1672 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1673 set_cc_static(s);
1674 return NO_EXIT;
1677 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1679 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1680 set_cc_static(s);
1681 return NO_EXIT;
1684 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1686 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1687 set_cc_static(s);
1688 return NO_EXIT;
1691 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1693 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1694 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1695 tcg_temp_free_i32(m3);
1696 gen_set_cc_nz_f32(s, o->in2);
1697 return NO_EXIT;
1700 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1702 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1703 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1704 tcg_temp_free_i32(m3);
1705 gen_set_cc_nz_f64(s, o->in2);
1706 return NO_EXIT;
1709 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1711 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1712 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1713 tcg_temp_free_i32(m3);
1714 gen_set_cc_nz_f128(s, o->in1, o->in2);
1715 return NO_EXIT;
1718 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1720 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1721 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1722 tcg_temp_free_i32(m3);
1723 gen_set_cc_nz_f32(s, o->in2);
1724 return NO_EXIT;
1727 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1729 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1730 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1731 tcg_temp_free_i32(m3);
1732 gen_set_cc_nz_f64(s, o->in2);
1733 return NO_EXIT;
1736 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1738 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1739 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1740 tcg_temp_free_i32(m3);
1741 gen_set_cc_nz_f128(s, o->in1, o->in2);
1742 return NO_EXIT;
1745 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1747 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1748 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1749 tcg_temp_free_i32(m3);
1750 gen_set_cc_nz_f32(s, o->in2);
1751 return NO_EXIT;
1754 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1756 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1757 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1758 tcg_temp_free_i32(m3);
1759 gen_set_cc_nz_f64(s, o->in2);
1760 return NO_EXIT;
1763 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1765 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1766 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1767 tcg_temp_free_i32(m3);
1768 gen_set_cc_nz_f128(s, o->in1, o->in2);
1769 return NO_EXIT;
1772 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1774 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1775 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1776 tcg_temp_free_i32(m3);
1777 gen_set_cc_nz_f32(s, o->in2);
1778 return NO_EXIT;
1781 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1783 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1784 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1785 tcg_temp_free_i32(m3);
1786 gen_set_cc_nz_f64(s, o->in2);
1787 return NO_EXIT;
1790 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1792 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1793 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1794 tcg_temp_free_i32(m3);
1795 gen_set_cc_nz_f128(s, o->in1, o->in2);
1796 return NO_EXIT;
1799 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1801 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1802 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1803 tcg_temp_free_i32(m3);
1804 return NO_EXIT;
1807 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1809 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1810 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1811 tcg_temp_free_i32(m3);
1812 return NO_EXIT;
1815 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1817 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1818 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1819 tcg_temp_free_i32(m3);
1820 return_low128(o->out2);
1821 return NO_EXIT;
1824 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1826 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1827 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1828 tcg_temp_free_i32(m3);
1829 return NO_EXIT;
1832 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1834 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1835 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1836 tcg_temp_free_i32(m3);
1837 return NO_EXIT;
1840 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1842 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1843 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1844 tcg_temp_free_i32(m3);
1845 return_low128(o->out2);
1846 return NO_EXIT;
1849 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1851 int r2 = get_field(s->fields, r2);
1852 TCGv_i64 len = tcg_temp_new_i64();
1854 potential_page_fault(s);
1855 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1856 set_cc_static(s);
1857 return_low128(o->out);
1859 tcg_gen_add_i64(regs[r2], regs[r2], len);
1860 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1861 tcg_temp_free_i64(len);
1863 return NO_EXIT;
1866 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1868 int l = get_field(s->fields, l1);
1869 TCGv_i32 vl;
1871 switch (l + 1) {
1872 case 1:
1873 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1874 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1875 break;
1876 case 2:
1877 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1878 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1879 break;
1880 case 4:
1881 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1882 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1883 break;
1884 case 8:
1885 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1886 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1887 break;
1888 default:
1889 potential_page_fault(s);
1890 vl = tcg_const_i32(l);
1891 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1892 tcg_temp_free_i32(vl);
1893 set_cc_static(s);
1894 return NO_EXIT;
1896 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1897 return NO_EXIT;
1900 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1902 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1903 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1904 potential_page_fault(s);
1905 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1906 tcg_temp_free_i32(r1);
1907 tcg_temp_free_i32(r3);
1908 set_cc_static(s);
1909 return NO_EXIT;
1912 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1914 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1915 TCGv_i32 t1 = tcg_temp_new_i32();
1916 tcg_gen_extrl_i64_i32(t1, o->in1);
1917 potential_page_fault(s);
1918 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1919 set_cc_static(s);
1920 tcg_temp_free_i32(t1);
1921 tcg_temp_free_i32(m3);
1922 return NO_EXIT;
1925 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1927 potential_page_fault(s);
1928 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1929 set_cc_static(s);
1930 return_low128(o->in2);
1931 return NO_EXIT;
1934 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1936 TCGv_i64 t = tcg_temp_new_i64();
1937 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1938 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1939 tcg_gen_or_i64(o->out, o->out, t);
1940 tcg_temp_free_i64(t);
1941 return NO_EXIT;
1944 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1946 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1947 int d2 = get_field(s->fields, d2);
1948 int b2 = get_field(s->fields, b2);
1949 int is_64 = s->insn->data;
1950 TCGv_i64 addr, mem, cc, z;
1952 /* Note that in1 = R3 (new value) and
1953 in2 = (zero-extended) R1 (expected value). */
1955 /* Load the memory into the (temporary) output. While the PoO only talks
1956 about moving the memory to R1 on inequality, if we include equality it
1957 means that R1 is equal to the memory in all conditions. */
1958 addr = get_address(s, 0, b2, d2);
1959 if (is_64) {
1960 tcg_gen_qemu_ld64(o->out, addr, get_mem_index(s));
1961 } else {
1962 tcg_gen_qemu_ld32u(o->out, addr, get_mem_index(s));
1965 /* Are the memory and expected values (un)equal? Note that this setcond
1966 produces the output CC value, thus the NE sense of the test. */
1967 cc = tcg_temp_new_i64();
1968 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1970 /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
1971 Recall that we are allowed to unconditionally issue the store (and
1972 thus any possible write trap), so (re-)store the original contents
1973 of MEM in case of inequality. */
1974 z = tcg_const_i64(0);
1975 mem = tcg_temp_new_i64();
1976 tcg_gen_movcond_i64(TCG_COND_EQ, mem, cc, z, o->in1, o->out);
1977 if (is_64) {
1978 tcg_gen_qemu_st64(mem, addr, get_mem_index(s));
1979 } else {
1980 tcg_gen_qemu_st32(mem, addr, get_mem_index(s));
1982 tcg_temp_free_i64(z);
1983 tcg_temp_free_i64(mem);
1984 tcg_temp_free_i64(addr);
1986 /* Store CC back to cc_op. Wait until after the store so that any
1987 exception gets the old cc_op value. */
1988 tcg_gen_extrl_i64_i32(cc_op, cc);
1989 tcg_temp_free_i64(cc);
1990 set_cc_static(s);
1991 return NO_EXIT;
1994 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1996 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1997 int r1 = get_field(s->fields, r1);
1998 int r3 = get_field(s->fields, r3);
1999 int d2 = get_field(s->fields, d2);
2000 int b2 = get_field(s->fields, b2);
2001 TCGv_i64 addrh, addrl, memh, meml, outh, outl, cc, z;
2003 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2005 addrh = get_address(s, 0, b2, d2);
2006 addrl = get_address(s, 0, b2, d2 + 8);
2007 outh = tcg_temp_new_i64();
2008 outl = tcg_temp_new_i64();
2010 tcg_gen_qemu_ld64(outh, addrh, get_mem_index(s));
2011 tcg_gen_qemu_ld64(outl, addrl, get_mem_index(s));
2013 /* Fold the double-word compare with arithmetic. */
2014 cc = tcg_temp_new_i64();
2015 z = tcg_temp_new_i64();
2016 tcg_gen_xor_i64(cc, outh, regs[r1]);
2017 tcg_gen_xor_i64(z, outl, regs[r1 + 1]);
2018 tcg_gen_or_i64(cc, cc, z);
2019 tcg_gen_movi_i64(z, 0);
2020 tcg_gen_setcond_i64(TCG_COND_NE, cc, cc, z);
2022 memh = tcg_temp_new_i64();
2023 meml = tcg_temp_new_i64();
2024 tcg_gen_movcond_i64(TCG_COND_EQ, memh, cc, z, regs[r3], outh);
2025 tcg_gen_movcond_i64(TCG_COND_EQ, meml, cc, z, regs[r3 + 1], outl);
2026 tcg_temp_free_i64(z);
2028 tcg_gen_qemu_st64(memh, addrh, get_mem_index(s));
2029 tcg_gen_qemu_st64(meml, addrl, get_mem_index(s));
2030 tcg_temp_free_i64(memh);
2031 tcg_temp_free_i64(meml);
2032 tcg_temp_free_i64(addrh);
2033 tcg_temp_free_i64(addrl);
2035 /* Save back state now that we've passed all exceptions. */
2036 tcg_gen_mov_i64(regs[r1], outh);
2037 tcg_gen_mov_i64(regs[r1 + 1], outl);
2038 tcg_gen_extrl_i64_i32(cc_op, cc);
2039 tcg_temp_free_i64(outh);
2040 tcg_temp_free_i64(outl);
2041 tcg_temp_free_i64(cc);
2042 set_cc_static(s);
2043 return NO_EXIT;
2046 #ifndef CONFIG_USER_ONLY
2047 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
2049 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2050 check_privileged(s);
2051 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
2052 tcg_temp_free_i32(r1);
2053 set_cc_static(s);
2054 return NO_EXIT;
2056 #endif
2058 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2060 TCGv_i64 t1 = tcg_temp_new_i64();
2061 TCGv_i32 t2 = tcg_temp_new_i32();
2062 tcg_gen_extrl_i64_i32(t2, o->in1);
2063 gen_helper_cvd(t1, t2);
2064 tcg_temp_free_i32(t2);
2065 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2066 tcg_temp_free_i64(t1);
2067 return NO_EXIT;
2070 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
2072 int m3 = get_field(s->fields, m3);
2073 TCGLabel *lab = gen_new_label();
2074 TCGCond c;
2076 c = tcg_invert_cond(ltgt_cond[m3]);
2077 if (s->insn->data) {
2078 c = tcg_unsigned_cond(c);
2080 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2082 /* Trap. */
2083 gen_trap(s);
2085 gen_set_label(lab);
2086 return NO_EXIT;
2089 #ifndef CONFIG_USER_ONLY
2090 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2092 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2093 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2094 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2096 check_privileged(s);
2097 update_psw_addr(s);
2098 gen_op_calc_cc(s);
2100 gen_helper_diag(cpu_env, r1, r3, func_code);
2102 tcg_temp_free_i32(func_code);
2103 tcg_temp_free_i32(r3);
2104 tcg_temp_free_i32(r1);
2105 return NO_EXIT;
2107 #endif
2109 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2111 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2112 return_low128(o->out);
2113 return NO_EXIT;
2116 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2118 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2119 return_low128(o->out);
2120 return NO_EXIT;
2123 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2125 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2126 return_low128(o->out);
2127 return NO_EXIT;
2130 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2132 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2133 return_low128(o->out);
2134 return NO_EXIT;
2137 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2139 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2140 return NO_EXIT;
2143 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2145 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2146 return NO_EXIT;
2149 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2151 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2152 return_low128(o->out2);
2153 return NO_EXIT;
2156 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2158 int r2 = get_field(s->fields, r2);
2159 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2160 return NO_EXIT;
2163 static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
2165 /* No cache information provided. */
2166 tcg_gen_movi_i64(o->out, -1);
2167 return NO_EXIT;
2170 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2172 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2173 return NO_EXIT;
2176 static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
2178 int r1 = get_field(s->fields, r1);
2179 int r2 = get_field(s->fields, r2);
2180 TCGv_i64 t = tcg_temp_new_i64();
2182 /* Note the "subsequently" in the PoO, which implies a defined result
2183 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2184 tcg_gen_shri_i64(t, psw_mask, 32);
2185 store_reg32_i64(r1, t);
2186 if (r2 != 0) {
2187 store_reg32_i64(r2, psw_mask);
2190 tcg_temp_free_i64(t);
2191 return NO_EXIT;
2194 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2196 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2197 tb->flags, (ab)use the tb->cs_base field as the address of
2198 the template in memory, and grab 8 bits of tb->flags/cflags for
2199 the contents of the register. We would then recognize all this
2200 in gen_intermediate_code_internal, generating code for exactly
2201 one instruction. This new TB then gets executed normally.
2203 On the other hand, this seems to be mostly used for modifying
2204 MVC inside of memcpy, which needs a helper call anyway. So
2205 perhaps this doesn't bear thinking about any further. */
2207 TCGv_i64 tmp;
2209 update_psw_addr(s);
2210 gen_op_calc_cc(s);
2212 tmp = tcg_const_i64(s->next_pc);
2213 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2214 tcg_temp_free_i64(tmp);
2216 return NO_EXIT;
2219 static ExitStatus op_fieb(DisasContext *s, DisasOps *o)
2221 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2222 gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2223 tcg_temp_free_i32(m3);
2224 return NO_EXIT;
2227 static ExitStatus op_fidb(DisasContext *s, DisasOps *o)
2229 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2230 gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2231 tcg_temp_free_i32(m3);
2232 return NO_EXIT;
2235 static ExitStatus op_fixb(DisasContext *s, DisasOps *o)
2237 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2238 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2239 return_low128(o->out2);
2240 tcg_temp_free_i32(m3);
2241 return NO_EXIT;
2244 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2246 /* We'll use the original input for cc computation, since we get to
2247 compare that against 0, which ought to be better than comparing
2248 the real output against 64. It also lets cc_dst be a convenient
2249 temporary during our computation. */
2250 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2252 /* R1 = IN ? CLZ(IN) : 64. */
2253 tcg_gen_clzi_i64(o->out, o->in2, 64);
2255 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2256 value by 64, which is undefined. But since the shift is 64 iff the
2257 input is zero, we still get the correct result after and'ing. */
2258 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2259 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2260 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2261 return NO_EXIT;
2264 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2266 int m3 = get_field(s->fields, m3);
2267 int pos, len, base = s->insn->data;
2268 TCGv_i64 tmp = tcg_temp_new_i64();
2269 uint64_t ccm;
2271 switch (m3) {
2272 case 0xf:
2273 /* Effectively a 32-bit load. */
2274 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2275 len = 32;
2276 goto one_insert;
2278 case 0xc:
2279 case 0x6:
2280 case 0x3:
2281 /* Effectively a 16-bit load. */
2282 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2283 len = 16;
2284 goto one_insert;
2286 case 0x8:
2287 case 0x4:
2288 case 0x2:
2289 case 0x1:
2290 /* Effectively an 8-bit load. */
2291 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2292 len = 8;
2293 goto one_insert;
2295 one_insert:
2296 pos = base + ctz32(m3) * 8;
2297 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2298 ccm = ((1ull << len) - 1) << pos;
2299 break;
2301 default:
2302 /* This is going to be a sequence of loads and inserts. */
2303 pos = base + 32 - 8;
2304 ccm = 0;
2305 while (m3) {
2306 if (m3 & 0x8) {
2307 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2308 tcg_gen_addi_i64(o->in2, o->in2, 1);
2309 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2310 ccm |= 0xff << pos;
2312 m3 = (m3 << 1) & 0xf;
2313 pos -= 8;
2315 break;
2318 tcg_gen_movi_i64(tmp, ccm);
2319 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2320 tcg_temp_free_i64(tmp);
2321 return NO_EXIT;
2324 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2326 int shift = s->insn->data & 0xff;
2327 int size = s->insn->data >> 8;
2328 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2329 return NO_EXIT;
2332 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2334 TCGv_i64 t1;
2336 gen_op_calc_cc(s);
2337 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2339 t1 = tcg_temp_new_i64();
2340 tcg_gen_shli_i64(t1, psw_mask, 20);
2341 tcg_gen_shri_i64(t1, t1, 36);
2342 tcg_gen_or_i64(o->out, o->out, t1);
2344 tcg_gen_extu_i32_i64(t1, cc_op);
2345 tcg_gen_shli_i64(t1, t1, 28);
2346 tcg_gen_or_i64(o->out, o->out, t1);
2347 tcg_temp_free_i64(t1);
2348 return NO_EXIT;
2351 #ifndef CONFIG_USER_ONLY
2352 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2354 check_privileged(s);
2355 gen_helper_ipte(cpu_env, o->in1, o->in2);
2356 return NO_EXIT;
2359 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2361 check_privileged(s);
2362 gen_helper_iske(o->out, cpu_env, o->in2);
2363 return NO_EXIT;
2365 #endif
2367 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2369 gen_helper_ldeb(o->out, cpu_env, o->in2);
2370 return NO_EXIT;
2373 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2375 gen_helper_ledb(o->out, cpu_env, o->in2);
2376 return NO_EXIT;
2379 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2381 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2382 return NO_EXIT;
2385 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2387 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2388 return NO_EXIT;
2391 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2393 gen_helper_lxdb(o->out, cpu_env, o->in2);
2394 return_low128(o->out2);
2395 return NO_EXIT;
2398 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2400 gen_helper_lxeb(o->out, cpu_env, o->in2);
2401 return_low128(o->out2);
2402 return NO_EXIT;
2405 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2407 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2408 return NO_EXIT;
2411 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2413 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2414 return NO_EXIT;
2417 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2419 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2420 return NO_EXIT;
2423 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2425 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2426 return NO_EXIT;
2429 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2431 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2432 return NO_EXIT;
2435 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2437 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2438 return NO_EXIT;
2441 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2443 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2444 return NO_EXIT;
2447 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2449 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2450 return NO_EXIT;
2453 static ExitStatus op_lat(DisasContext *s, DisasOps *o)
2455 TCGLabel *lab = gen_new_label();
2456 store_reg32_i64(get_field(s->fields, r1), o->in2);
2457 /* The value is stored even in case of trap. */
2458 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2459 gen_trap(s);
2460 gen_set_label(lab);
2461 return NO_EXIT;
2464 static ExitStatus op_lgat(DisasContext *s, DisasOps *o)
2466 TCGLabel *lab = gen_new_label();
2467 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2468 /* The value is stored even in case of trap. */
2469 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2470 gen_trap(s);
2471 gen_set_label(lab);
2472 return NO_EXIT;
2475 static ExitStatus op_lfhat(DisasContext *s, DisasOps *o)
2477 TCGLabel *lab = gen_new_label();
2478 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2479 /* The value is stored even in case of trap. */
2480 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2481 gen_trap(s);
2482 gen_set_label(lab);
2483 return NO_EXIT;
2486 static ExitStatus op_llgfat(DisasContext *s, DisasOps *o)
2488 TCGLabel *lab = gen_new_label();
2489 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2490 /* The value is stored even in case of trap. */
2491 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2492 gen_trap(s);
2493 gen_set_label(lab);
2494 return NO_EXIT;
2497 static ExitStatus op_llgtat(DisasContext *s, DisasOps *o)
2499 TCGLabel *lab = gen_new_label();
2500 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2501 /* The value is stored even in case of trap. */
2502 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2503 gen_trap(s);
2504 gen_set_label(lab);
2505 return NO_EXIT;
2508 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2510 DisasCompare c;
2512 disas_jcc(s, &c, get_field(s->fields, m3));
2514 if (c.is_64) {
2515 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2516 o->in2, o->in1);
2517 free_compare(&c);
2518 } else {
2519 TCGv_i32 t32 = tcg_temp_new_i32();
2520 TCGv_i64 t, z;
2522 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2523 free_compare(&c);
2525 t = tcg_temp_new_i64();
2526 tcg_gen_extu_i32_i64(t, t32);
2527 tcg_temp_free_i32(t32);
2529 z = tcg_const_i64(0);
2530 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2531 tcg_temp_free_i64(t);
2532 tcg_temp_free_i64(z);
2535 return NO_EXIT;
2538 #ifndef CONFIG_USER_ONLY
2539 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2541 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2542 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2543 check_privileged(s);
2544 potential_page_fault(s);
2545 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2546 tcg_temp_free_i32(r1);
2547 tcg_temp_free_i32(r3);
2548 return NO_EXIT;
2551 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2553 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2554 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2555 check_privileged(s);
2556 potential_page_fault(s);
2557 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2558 tcg_temp_free_i32(r1);
2559 tcg_temp_free_i32(r3);
2560 return NO_EXIT;
2562 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2564 check_privileged(s);
2565 potential_page_fault(s);
2566 gen_helper_lra(o->out, cpu_env, o->in2);
2567 set_cc_static(s);
2568 return NO_EXIT;
2571 static ExitStatus op_lpp(DisasContext *s, DisasOps *o)
2573 check_privileged(s);
2575 tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
2576 return NO_EXIT;
2579 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2581 TCGv_i64 t1, t2;
2583 check_privileged(s);
2584 per_breaking_event(s);
2586 t1 = tcg_temp_new_i64();
2587 t2 = tcg_temp_new_i64();
2588 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2589 tcg_gen_addi_i64(o->in2, o->in2, 4);
2590 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2591 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2592 tcg_gen_shli_i64(t1, t1, 32);
2593 gen_helper_load_psw(cpu_env, t1, t2);
2594 tcg_temp_free_i64(t1);
2595 tcg_temp_free_i64(t2);
2596 return EXIT_NORETURN;
2599 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2601 TCGv_i64 t1, t2;
2603 check_privileged(s);
2604 per_breaking_event(s);
2606 t1 = tcg_temp_new_i64();
2607 t2 = tcg_temp_new_i64();
2608 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2609 tcg_gen_addi_i64(o->in2, o->in2, 8);
2610 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2611 gen_helper_load_psw(cpu_env, t1, t2);
2612 tcg_temp_free_i64(t1);
2613 tcg_temp_free_i64(t2);
2614 return EXIT_NORETURN;
2616 #endif
2618 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2620 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2621 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2622 potential_page_fault(s);
2623 gen_helper_lam(cpu_env, r1, o->in2, r3);
2624 tcg_temp_free_i32(r1);
2625 tcg_temp_free_i32(r3);
2626 return NO_EXIT;
2629 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2631 int r1 = get_field(s->fields, r1);
2632 int r3 = get_field(s->fields, r3);
2633 TCGv_i64 t1, t2;
2635 /* Only one register to read. */
2636 t1 = tcg_temp_new_i64();
2637 if (unlikely(r1 == r3)) {
2638 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2639 store_reg32_i64(r1, t1);
2640 tcg_temp_free(t1);
2641 return NO_EXIT;
2644 /* First load the values of the first and last registers to trigger
2645 possible page faults. */
2646 t2 = tcg_temp_new_i64();
2647 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2648 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2649 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2650 store_reg32_i64(r1, t1);
2651 store_reg32_i64(r3, t2);
2653 /* Only two registers to read. */
2654 if (((r1 + 1) & 15) == r3) {
2655 tcg_temp_free(t2);
2656 tcg_temp_free(t1);
2657 return NO_EXIT;
2660 /* Then load the remaining registers. Page fault can't occur. */
2661 r3 = (r3 - 1) & 15;
2662 tcg_gen_movi_i64(t2, 4);
2663 while (r1 != r3) {
2664 r1 = (r1 + 1) & 15;
2665 tcg_gen_add_i64(o->in2, o->in2, t2);
2666 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2667 store_reg32_i64(r1, t1);
2669 tcg_temp_free(t2);
2670 tcg_temp_free(t1);
2672 return NO_EXIT;
2675 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2677 int r1 = get_field(s->fields, r1);
2678 int r3 = get_field(s->fields, r3);
2679 TCGv_i64 t1, t2;
2681 /* Only one register to read. */
2682 t1 = tcg_temp_new_i64();
2683 if (unlikely(r1 == r3)) {
2684 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2685 store_reg32h_i64(r1, t1);
2686 tcg_temp_free(t1);
2687 return NO_EXIT;
2690 /* First load the values of the first and last registers to trigger
2691 possible page faults. */
2692 t2 = tcg_temp_new_i64();
2693 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2694 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2695 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2696 store_reg32h_i64(r1, t1);
2697 store_reg32h_i64(r3, t2);
2699 /* Only two registers to read. */
2700 if (((r1 + 1) & 15) == r3) {
2701 tcg_temp_free(t2);
2702 tcg_temp_free(t1);
2703 return NO_EXIT;
2706 /* Then load the remaining registers. Page fault can't occur. */
2707 r3 = (r3 - 1) & 15;
2708 tcg_gen_movi_i64(t2, 4);
2709 while (r1 != r3) {
2710 r1 = (r1 + 1) & 15;
2711 tcg_gen_add_i64(o->in2, o->in2, t2);
2712 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2713 store_reg32h_i64(r1, t1);
2715 tcg_temp_free(t2);
2716 tcg_temp_free(t1);
2718 return NO_EXIT;
2721 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2723 int r1 = get_field(s->fields, r1);
2724 int r3 = get_field(s->fields, r3);
2725 TCGv_i64 t1, t2;
2727 /* Only one register to read. */
2728 if (unlikely(r1 == r3)) {
2729 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2730 return NO_EXIT;
2733 /* First load the values of the first and last registers to trigger
2734 possible page faults. */
2735 t1 = tcg_temp_new_i64();
2736 t2 = tcg_temp_new_i64();
2737 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2738 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
2739 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
2740 tcg_gen_mov_i64(regs[r1], t1);
2741 tcg_temp_free(t2);
2743 /* Only two registers to read. */
2744 if (((r1 + 1) & 15) == r3) {
2745 tcg_temp_free(t1);
2746 return NO_EXIT;
2749 /* Then load the remaining registers. Page fault can't occur. */
2750 r3 = (r3 - 1) & 15;
2751 tcg_gen_movi_i64(t1, 8);
2752 while (r1 != r3) {
2753 r1 = (r1 + 1) & 15;
2754 tcg_gen_add_i64(o->in2, o->in2, t1);
2755 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2757 tcg_temp_free(t1);
2759 return NO_EXIT;
2762 #ifndef CONFIG_USER_ONLY
2763 static ExitStatus op_lura(DisasContext *s, DisasOps *o)
2765 check_privileged(s);
2766 potential_page_fault(s);
2767 gen_helper_lura(o->out, cpu_env, o->in2);
2768 return NO_EXIT;
2771 static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
2773 check_privileged(s);
2774 potential_page_fault(s);
2775 gen_helper_lurag(o->out, cpu_env, o->in2);
2776 return NO_EXIT;
2778 #endif
2780 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2782 o->out = o->in2;
2783 o->g_out = o->g_in2;
2784 TCGV_UNUSED_I64(o->in2);
2785 o->g_in2 = false;
2786 return NO_EXIT;
2789 static ExitStatus op_mov2e(DisasContext *s, DisasOps *o)
2791 int b2 = get_field(s->fields, b2);
2792 TCGv ar1 = tcg_temp_new_i64();
2794 o->out = o->in2;
2795 o->g_out = o->g_in2;
2796 TCGV_UNUSED_I64(o->in2);
2797 o->g_in2 = false;
2799 switch (s->tb->flags & FLAG_MASK_ASC) {
2800 case PSW_ASC_PRIMARY >> 32:
2801 tcg_gen_movi_i64(ar1, 0);
2802 break;
2803 case PSW_ASC_ACCREG >> 32:
2804 tcg_gen_movi_i64(ar1, 1);
2805 break;
2806 case PSW_ASC_SECONDARY >> 32:
2807 if (b2) {
2808 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
2809 } else {
2810 tcg_gen_movi_i64(ar1, 0);
2812 break;
2813 case PSW_ASC_HOME >> 32:
2814 tcg_gen_movi_i64(ar1, 2);
2815 break;
2818 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
2819 tcg_temp_free_i64(ar1);
2821 return NO_EXIT;
2824 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2826 o->out = o->in1;
2827 o->out2 = o->in2;
2828 o->g_out = o->g_in1;
2829 o->g_out2 = o->g_in2;
2830 TCGV_UNUSED_I64(o->in1);
2831 TCGV_UNUSED_I64(o->in2);
2832 o->g_in1 = o->g_in2 = false;
2833 return NO_EXIT;
2836 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2838 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2839 potential_page_fault(s);
2840 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2841 tcg_temp_free_i32(l);
2842 return NO_EXIT;
2845 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2847 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2848 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2849 potential_page_fault(s);
2850 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2851 tcg_temp_free_i32(r1);
2852 tcg_temp_free_i32(r2);
2853 set_cc_static(s);
2854 return NO_EXIT;
2857 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2859 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2860 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2861 potential_page_fault(s);
2862 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2863 tcg_temp_free_i32(r1);
2864 tcg_temp_free_i32(r3);
2865 set_cc_static(s);
2866 return NO_EXIT;
2869 #ifndef CONFIG_USER_ONLY
2870 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2872 int r1 = get_field(s->fields, l1);
2873 check_privileged(s);
2874 potential_page_fault(s);
2875 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2876 set_cc_static(s);
2877 return NO_EXIT;
2880 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2882 int r1 = get_field(s->fields, l1);
2883 check_privileged(s);
2884 potential_page_fault(s);
2885 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2886 set_cc_static(s);
2887 return NO_EXIT;
2889 #endif
2891 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2893 potential_page_fault(s);
2894 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2895 set_cc_static(s);
2896 return NO_EXIT;
2899 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2901 potential_page_fault(s);
2902 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2903 set_cc_static(s);
2904 return_low128(o->in2);
2905 return NO_EXIT;
2908 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2910 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2911 return NO_EXIT;
2914 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2916 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
2917 return NO_EXIT;
2920 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2922 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2923 return NO_EXIT;
2926 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2928 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2929 return NO_EXIT;
2932 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2934 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2935 return NO_EXIT;
2938 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2940 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2941 return_low128(o->out2);
2942 return NO_EXIT;
2945 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2947 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2948 return_low128(o->out2);
2949 return NO_EXIT;
2952 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2954 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2955 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2956 tcg_temp_free_i64(r3);
2957 return NO_EXIT;
2960 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2962 int r3 = get_field(s->fields, r3);
2963 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2964 return NO_EXIT;
2967 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2969 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2970 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2971 tcg_temp_free_i64(r3);
2972 return NO_EXIT;
2975 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2977 int r3 = get_field(s->fields, r3);
2978 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2979 return NO_EXIT;
2982 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2984 TCGv_i64 z, n;
2985 z = tcg_const_i64(0);
2986 n = tcg_temp_new_i64();
2987 tcg_gen_neg_i64(n, o->in2);
2988 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
2989 tcg_temp_free_i64(n);
2990 tcg_temp_free_i64(z);
2991 return NO_EXIT;
2994 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2996 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2997 return NO_EXIT;
3000 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
3002 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3003 return NO_EXIT;
3006 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
3008 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3009 tcg_gen_mov_i64(o->out2, o->in2);
3010 return NO_EXIT;
3013 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
3015 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3016 potential_page_fault(s);
3017 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3018 tcg_temp_free_i32(l);
3019 set_cc_static(s);
3020 return NO_EXIT;
3023 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
3025 tcg_gen_neg_i64(o->out, o->in2);
3026 return NO_EXIT;
3029 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
3031 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3032 return NO_EXIT;
3035 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
3037 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3038 return NO_EXIT;
3041 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
3043 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3044 tcg_gen_mov_i64(o->out2, o->in2);
3045 return NO_EXIT;
3048 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
3050 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3051 potential_page_fault(s);
3052 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3053 tcg_temp_free_i32(l);
3054 set_cc_static(s);
3055 return NO_EXIT;
3058 static ExitStatus op_or(DisasContext *s, DisasOps *o)
3060 tcg_gen_or_i64(o->out, o->in1, o->in2);
3061 return NO_EXIT;
3064 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
3066 int shift = s->insn->data & 0xff;
3067 int size = s->insn->data >> 8;
3068 uint64_t mask = ((1ull << size) - 1) << shift;
3070 assert(!o->g_in2);
3071 tcg_gen_shli_i64(o->in2, o->in2, shift);
3072 tcg_gen_or_i64(o->out, o->in1, o->in2);
3074 /* Produce the CC from only the bits manipulated. */
3075 tcg_gen_andi_i64(cc_dst, o->out, mask);
3076 set_cc_nz_u64(s, cc_dst);
3077 return NO_EXIT;
3080 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
3082 gen_helper_popcnt(o->out, o->in2);
3083 return NO_EXIT;
3086 #ifndef CONFIG_USER_ONLY
3087 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
3089 check_privileged(s);
3090 gen_helper_ptlb(cpu_env);
3091 return NO_EXIT;
3093 #endif
3095 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
3097 int i3 = get_field(s->fields, i3);
3098 int i4 = get_field(s->fields, i4);
3099 int i5 = get_field(s->fields, i5);
3100 int do_zero = i4 & 0x80;
3101 uint64_t mask, imask, pmask;
3102 int pos, len, rot;
3104 /* Adjust the arguments for the specific insn. */
3105 switch (s->fields->op2) {
3106 case 0x55: /* risbg */
3107 i3 &= 63;
3108 i4 &= 63;
3109 pmask = ~0;
3110 break;
3111 case 0x5d: /* risbhg */
3112 i3 &= 31;
3113 i4 &= 31;
3114 pmask = 0xffffffff00000000ull;
3115 break;
3116 case 0x51: /* risblg */
3117 i3 &= 31;
3118 i4 &= 31;
3119 pmask = 0x00000000ffffffffull;
3120 break;
3121 default:
3122 abort();
3125 /* MASK is the set of bits to be inserted from R2.
3126 Take care for I3/I4 wraparound. */
3127 mask = pmask >> i3;
3128 if (i3 <= i4) {
3129 mask ^= pmask >> i4 >> 1;
3130 } else {
3131 mask |= ~(pmask >> i4 >> 1);
3133 mask &= pmask;
3135 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3136 insns, we need to keep the other half of the register. */
3137 imask = ~mask | ~pmask;
3138 if (do_zero) {
3139 if (s->fields->op2 == 0x55) {
3140 imask = 0;
3141 } else {
3142 imask = ~pmask;
3146 len = i4 - i3 + 1;
3147 pos = 63 - i4;
3148 rot = i5 & 63;
3149 if (s->fields->op2 == 0x5d) {
3150 pos += 32;
3153 /* In some cases we can implement this with extract. */
3154 if (imask == 0 && pos == 0 && len > 0 && rot + len <= 64) {
3155 tcg_gen_extract_i64(o->out, o->in2, rot, len);
3156 return NO_EXIT;
3159 /* In some cases we can implement this with deposit. */
3160 if (len > 0 && (imask == 0 || ~mask == imask)) {
3161 /* Note that we rotate the bits to be inserted to the lsb, not to
3162 the position as described in the PoO. */
3163 rot = (rot - pos) & 63;
3164 } else {
3165 pos = -1;
3168 /* Rotate the input as necessary. */
3169 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3171 /* Insert the selected bits into the output. */
3172 if (pos >= 0) {
3173 if (imask == 0) {
3174 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3175 } else {
3176 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3178 } else if (imask == 0) {
3179 tcg_gen_andi_i64(o->out, o->in2, mask);
3180 } else {
3181 tcg_gen_andi_i64(o->in2, o->in2, mask);
3182 tcg_gen_andi_i64(o->out, o->out, imask);
3183 tcg_gen_or_i64(o->out, o->out, o->in2);
3185 return NO_EXIT;
3188 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
3190 int i3 = get_field(s->fields, i3);
3191 int i4 = get_field(s->fields, i4);
3192 int i5 = get_field(s->fields, i5);
3193 uint64_t mask;
3195 /* If this is a test-only form, arrange to discard the result. */
3196 if (i3 & 0x80) {
3197 o->out = tcg_temp_new_i64();
3198 o->g_out = false;
3201 i3 &= 63;
3202 i4 &= 63;
3203 i5 &= 63;
3205 /* MASK is the set of bits to be operated on from R2.
3206 Take care for I3/I4 wraparound. */
3207 mask = ~0ull >> i3;
3208 if (i3 <= i4) {
3209 mask ^= ~0ull >> i4 >> 1;
3210 } else {
3211 mask |= ~(~0ull >> i4 >> 1);
3214 /* Rotate the input as necessary. */
3215 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3217 /* Operate. */
3218 switch (s->fields->op2) {
3219 case 0x55: /* AND */
3220 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3221 tcg_gen_and_i64(o->out, o->out, o->in2);
3222 break;
3223 case 0x56: /* OR */
3224 tcg_gen_andi_i64(o->in2, o->in2, mask);
3225 tcg_gen_or_i64(o->out, o->out, o->in2);
3226 break;
3227 case 0x57: /* XOR */
3228 tcg_gen_andi_i64(o->in2, o->in2, mask);
3229 tcg_gen_xor_i64(o->out, o->out, o->in2);
3230 break;
3231 default:
3232 abort();
3235 /* Set the CC. */
3236 tcg_gen_andi_i64(cc_dst, o->out, mask);
3237 set_cc_nz_u64(s, cc_dst);
3238 return NO_EXIT;
3241 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
3243 tcg_gen_bswap16_i64(o->out, o->in2);
3244 return NO_EXIT;
3247 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
3249 tcg_gen_bswap32_i64(o->out, o->in2);
3250 return NO_EXIT;
3253 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
3255 tcg_gen_bswap64_i64(o->out, o->in2);
3256 return NO_EXIT;
3259 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
3261 TCGv_i32 t1 = tcg_temp_new_i32();
3262 TCGv_i32 t2 = tcg_temp_new_i32();
3263 TCGv_i32 to = tcg_temp_new_i32();
3264 tcg_gen_extrl_i64_i32(t1, o->in1);
3265 tcg_gen_extrl_i64_i32(t2, o->in2);
3266 tcg_gen_rotl_i32(to, t1, t2);
3267 tcg_gen_extu_i32_i64(o->out, to);
3268 tcg_temp_free_i32(t1);
3269 tcg_temp_free_i32(t2);
3270 tcg_temp_free_i32(to);
3271 return NO_EXIT;
3274 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
3276 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3277 return NO_EXIT;
3280 #ifndef CONFIG_USER_ONLY
3281 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
3283 check_privileged(s);
3284 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3285 set_cc_static(s);
3286 return NO_EXIT;
3289 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
3291 check_privileged(s);
3292 gen_helper_sacf(cpu_env, o->in2);
3293 /* Addressing mode has changed, so end the block. */
3294 return EXIT_PC_STALE;
3296 #endif
3298 static ExitStatus op_sam(DisasContext *s, DisasOps *o)
3300 int sam = s->insn->data;
3301 TCGv_i64 tsam;
3302 uint64_t mask;
3304 switch (sam) {
3305 case 0:
3306 mask = 0xffffff;
3307 break;
3308 case 1:
3309 mask = 0x7fffffff;
3310 break;
3311 default:
3312 mask = -1;
3313 break;
3316 /* Bizarre but true, we check the address of the current insn for the
3317 specification exception, not the next to be executed. Thus the PoO
3318 documents that Bad Things Happen two bytes before the end. */
3319 if (s->pc & ~mask) {
3320 gen_program_exception(s, PGM_SPECIFICATION);
3321 return EXIT_NORETURN;
3323 s->next_pc &= mask;
3325 tsam = tcg_const_i64(sam);
3326 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3327 tcg_temp_free_i64(tsam);
3329 /* Always exit the TB, since we (may have) changed execution mode. */
3330 return EXIT_PC_STALE;
3333 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
3335 int r1 = get_field(s->fields, r1);
3336 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3337 return NO_EXIT;
3340 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
3342 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3343 return NO_EXIT;
3346 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
3348 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3349 return NO_EXIT;
3352 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
3354 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3355 return_low128(o->out2);
3356 return NO_EXIT;
3359 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
3361 gen_helper_sqeb(o->out, cpu_env, o->in2);
3362 return NO_EXIT;
3365 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
3367 gen_helper_sqdb(o->out, cpu_env, o->in2);
3368 return NO_EXIT;
3371 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
3373 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3374 return_low128(o->out2);
3375 return NO_EXIT;
3378 #ifndef CONFIG_USER_ONLY
3379 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
3381 check_privileged(s);
3382 potential_page_fault(s);
3383 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3384 set_cc_static(s);
3385 return NO_EXIT;
3388 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3390 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3391 check_privileged(s);
3392 potential_page_fault(s);
3393 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
3394 tcg_temp_free_i32(r1);
3395 return NO_EXIT;
3397 #endif
3399 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3401 DisasCompare c;
3402 TCGv_i64 a;
3403 TCGLabel *lab;
3404 int r1;
3406 disas_jcc(s, &c, get_field(s->fields, m3));
3408 /* We want to store when the condition is fulfilled, so branch
3409 out when it's not */
3410 c.cond = tcg_invert_cond(c.cond);
3412 lab = gen_new_label();
3413 if (c.is_64) {
3414 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3415 } else {
3416 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3418 free_compare(&c);
3420 r1 = get_field(s->fields, r1);
3421 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3422 if (s->insn->data) {
3423 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3424 } else {
3425 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3427 tcg_temp_free_i64(a);
3429 gen_set_label(lab);
3430 return NO_EXIT;
3433 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3435 uint64_t sign = 1ull << s->insn->data;
3436 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3437 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3438 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3439 /* The arithmetic left shift is curious in that it does not affect
3440 the sign bit. Copy that over from the source unchanged. */
3441 tcg_gen_andi_i64(o->out, o->out, ~sign);
3442 tcg_gen_andi_i64(o->in1, o->in1, sign);
3443 tcg_gen_or_i64(o->out, o->out, o->in1);
3444 return NO_EXIT;
3447 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3449 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3450 return NO_EXIT;
3453 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3455 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3456 return NO_EXIT;
3459 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3461 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3462 return NO_EXIT;
3465 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3467 gen_helper_sfpc(cpu_env, o->in2);
3468 return NO_EXIT;
3471 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3473 gen_helper_sfas(cpu_env, o->in2);
3474 return NO_EXIT;
3477 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3479 int b2 = get_field(s->fields, b2);
3480 int d2 = get_field(s->fields, d2);
3481 TCGv_i64 t1 = tcg_temp_new_i64();
3482 TCGv_i64 t2 = tcg_temp_new_i64();
3483 int mask, pos, len;
3485 switch (s->fields->op2) {
3486 case 0x99: /* SRNM */
3487 pos = 0, len = 2;
3488 break;
3489 case 0xb8: /* SRNMB */
3490 pos = 0, len = 3;
3491 break;
3492 case 0xb9: /* SRNMT */
3493 pos = 4, len = 3;
3494 break;
3495 default:
3496 tcg_abort();
3498 mask = (1 << len) - 1;
3500 /* Insert the value into the appropriate field of the FPC. */
3501 if (b2 == 0) {
3502 tcg_gen_movi_i64(t1, d2 & mask);
3503 } else {
3504 tcg_gen_addi_i64(t1, regs[b2], d2);
3505 tcg_gen_andi_i64(t1, t1, mask);
3507 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3508 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3509 tcg_temp_free_i64(t1);
3511 /* Then install the new FPC to set the rounding mode in fpu_status. */
3512 gen_helper_sfpc(cpu_env, t2);
3513 tcg_temp_free_i64(t2);
3514 return NO_EXIT;
3517 #ifndef CONFIG_USER_ONLY
3518 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3520 check_privileged(s);
3521 tcg_gen_shri_i64(o->in2, o->in2, 4);
3522 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
3523 return NO_EXIT;
3526 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3528 check_privileged(s);
3529 gen_helper_sske(cpu_env, o->in1, o->in2);
3530 return NO_EXIT;
3533 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3535 check_privileged(s);
3536 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3537 return NO_EXIT;
3540 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3542 check_privileged(s);
3543 /* ??? Surely cpu address != cpu number. In any case the previous
3544 version of this stored more than the required half-word, so it
3545 is unlikely this has ever been tested. */
3546 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3547 return NO_EXIT;
3550 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3552 gen_helper_stck(o->out, cpu_env);
3553 /* ??? We don't implement clock states. */
3554 gen_op_movi_cc(s, 0);
3555 return NO_EXIT;
3558 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3560 TCGv_i64 c1 = tcg_temp_new_i64();
3561 TCGv_i64 c2 = tcg_temp_new_i64();
3562 gen_helper_stck(c1, cpu_env);
3563 /* Shift the 64-bit value into its place as a zero-extended
3564 104-bit value. Note that "bit positions 64-103 are always
3565 non-zero so that they compare differently to STCK"; we set
3566 the least significant bit to 1. */
3567 tcg_gen_shli_i64(c2, c1, 56);
3568 tcg_gen_shri_i64(c1, c1, 8);
3569 tcg_gen_ori_i64(c2, c2, 0x10000);
3570 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3571 tcg_gen_addi_i64(o->in2, o->in2, 8);
3572 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3573 tcg_temp_free_i64(c1);
3574 tcg_temp_free_i64(c2);
3575 /* ??? We don't implement clock states. */
3576 gen_op_movi_cc(s, 0);
3577 return NO_EXIT;
3580 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3582 check_privileged(s);
3583 gen_helper_sckc(cpu_env, o->in2);
3584 return NO_EXIT;
3587 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3589 check_privileged(s);
3590 gen_helper_stckc(o->out, cpu_env);
3591 return NO_EXIT;
3594 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3596 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3597 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3598 check_privileged(s);
3599 potential_page_fault(s);
3600 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3601 tcg_temp_free_i32(r1);
3602 tcg_temp_free_i32(r3);
3603 return NO_EXIT;
3606 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3608 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3609 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3610 check_privileged(s);
3611 potential_page_fault(s);
3612 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3613 tcg_temp_free_i32(r1);
3614 tcg_temp_free_i32(r3);
3615 return NO_EXIT;
3618 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3620 TCGv_i64 t1 = tcg_temp_new_i64();
3622 check_privileged(s);
3623 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3624 tcg_gen_ld32u_i64(t1, cpu_env, offsetof(CPUS390XState, machine_type));
3625 tcg_gen_deposit_i64(o->out, o->out, t1, 32, 32);
3626 tcg_temp_free_i64(t1);
3628 return NO_EXIT;
3631 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3633 check_privileged(s);
3634 gen_helper_spt(cpu_env, o->in2);
3635 return NO_EXIT;
3638 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3640 check_privileged(s);
3641 gen_helper_stfl(cpu_env);
3642 return NO_EXIT;
3645 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3647 check_privileged(s);
3648 gen_helper_stpt(o->out, cpu_env);
3649 return NO_EXIT;
3652 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3654 check_privileged(s);
3655 potential_page_fault(s);
3656 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3657 set_cc_static(s);
3658 return NO_EXIT;
3661 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3663 check_privileged(s);
3664 gen_helper_spx(cpu_env, o->in2);
3665 return NO_EXIT;
3668 static ExitStatus op_xsch(DisasContext *s, DisasOps *o)
3670 check_privileged(s);
3671 potential_page_fault(s);
3672 gen_helper_xsch(cpu_env, regs[1]);
3673 set_cc_static(s);
3674 return NO_EXIT;
3677 static ExitStatus op_csch(DisasContext *s, DisasOps *o)
3679 check_privileged(s);
3680 potential_page_fault(s);
3681 gen_helper_csch(cpu_env, regs[1]);
3682 set_cc_static(s);
3683 return NO_EXIT;
3686 static ExitStatus op_hsch(DisasContext *s, DisasOps *o)
3688 check_privileged(s);
3689 potential_page_fault(s);
3690 gen_helper_hsch(cpu_env, regs[1]);
3691 set_cc_static(s);
3692 return NO_EXIT;
3695 static ExitStatus op_msch(DisasContext *s, DisasOps *o)
3697 check_privileged(s);
3698 potential_page_fault(s);
3699 gen_helper_msch(cpu_env, regs[1], o->in2);
3700 set_cc_static(s);
3701 return NO_EXIT;
3704 static ExitStatus op_rchp(DisasContext *s, DisasOps *o)
3706 check_privileged(s);
3707 potential_page_fault(s);
3708 gen_helper_rchp(cpu_env, regs[1]);
3709 set_cc_static(s);
3710 return NO_EXIT;
3713 static ExitStatus op_rsch(DisasContext *s, DisasOps *o)
3715 check_privileged(s);
3716 potential_page_fault(s);
3717 gen_helper_rsch(cpu_env, regs[1]);
3718 set_cc_static(s);
3719 return NO_EXIT;
3722 static ExitStatus op_ssch(DisasContext *s, DisasOps *o)
3724 check_privileged(s);
3725 potential_page_fault(s);
3726 gen_helper_ssch(cpu_env, regs[1], o->in2);
3727 set_cc_static(s);
3728 return NO_EXIT;
3731 static ExitStatus op_stsch(DisasContext *s, DisasOps *o)
3733 check_privileged(s);
3734 potential_page_fault(s);
3735 gen_helper_stsch(cpu_env, regs[1], o->in2);
3736 set_cc_static(s);
3737 return NO_EXIT;
3740 static ExitStatus op_tsch(DisasContext *s, DisasOps *o)
3742 check_privileged(s);
3743 potential_page_fault(s);
3744 gen_helper_tsch(cpu_env, regs[1], o->in2);
3745 set_cc_static(s);
3746 return NO_EXIT;
3749 static ExitStatus op_chsc(DisasContext *s, DisasOps *o)
3751 check_privileged(s);
3752 potential_page_fault(s);
3753 gen_helper_chsc(cpu_env, o->in2);
3754 set_cc_static(s);
3755 return NO_EXIT;
3758 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
3760 check_privileged(s);
3761 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
3762 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
3763 return NO_EXIT;
3766 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3768 uint64_t i2 = get_field(s->fields, i2);
3769 TCGv_i64 t;
3771 check_privileged(s);
3773 /* It is important to do what the instruction name says: STORE THEN.
3774 If we let the output hook perform the store then if we fault and
3775 restart, we'll have the wrong SYSTEM MASK in place. */
3776 t = tcg_temp_new_i64();
3777 tcg_gen_shri_i64(t, psw_mask, 56);
3778 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3779 tcg_temp_free_i64(t);
3781 if (s->fields->op == 0xac) {
3782 tcg_gen_andi_i64(psw_mask, psw_mask,
3783 (i2 << 56) | 0x00ffffffffffffffull);
3784 } else {
3785 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3787 return NO_EXIT;
3790 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
3792 check_privileged(s);
3793 potential_page_fault(s);
3794 gen_helper_stura(cpu_env, o->in2, o->in1);
3795 return NO_EXIT;
3798 static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
3800 check_privileged(s);
3801 potential_page_fault(s);
3802 gen_helper_sturg(cpu_env, o->in2, o->in1);
3803 return NO_EXIT;
3805 #endif
3807 static ExitStatus op_stfle(DisasContext *s, DisasOps *o)
3809 potential_page_fault(s);
3810 gen_helper_stfle(cc_op, cpu_env, o->in2);
3811 set_cc_static(s);
3812 return NO_EXIT;
3815 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3817 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3818 return NO_EXIT;
3821 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3823 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3824 return NO_EXIT;
3827 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3829 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3830 return NO_EXIT;
3833 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3835 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3836 return NO_EXIT;
3839 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3841 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3842 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3843 potential_page_fault(s);
3844 gen_helper_stam(cpu_env, r1, o->in2, r3);
3845 tcg_temp_free_i32(r1);
3846 tcg_temp_free_i32(r3);
3847 return NO_EXIT;
3850 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3852 int m3 = get_field(s->fields, m3);
3853 int pos, base = s->insn->data;
3854 TCGv_i64 tmp = tcg_temp_new_i64();
3856 pos = base + ctz32(m3) * 8;
3857 switch (m3) {
3858 case 0xf:
3859 /* Effectively a 32-bit store. */
3860 tcg_gen_shri_i64(tmp, o->in1, pos);
3861 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3862 break;
3864 case 0xc:
3865 case 0x6:
3866 case 0x3:
3867 /* Effectively a 16-bit store. */
3868 tcg_gen_shri_i64(tmp, o->in1, pos);
3869 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3870 break;
3872 case 0x8:
3873 case 0x4:
3874 case 0x2:
3875 case 0x1:
3876 /* Effectively an 8-bit store. */
3877 tcg_gen_shri_i64(tmp, o->in1, pos);
3878 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3879 break;
3881 default:
3882 /* This is going to be a sequence of shifts and stores. */
3883 pos = base + 32 - 8;
3884 while (m3) {
3885 if (m3 & 0x8) {
3886 tcg_gen_shri_i64(tmp, o->in1, pos);
3887 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3888 tcg_gen_addi_i64(o->in2, o->in2, 1);
3890 m3 = (m3 << 1) & 0xf;
3891 pos -= 8;
3893 break;
3895 tcg_temp_free_i64(tmp);
3896 return NO_EXIT;
3899 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3901 int r1 = get_field(s->fields, r1);
3902 int r3 = get_field(s->fields, r3);
3903 int size = s->insn->data;
3904 TCGv_i64 tsize = tcg_const_i64(size);
3906 while (1) {
3907 if (size == 8) {
3908 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3909 } else {
3910 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3912 if (r1 == r3) {
3913 break;
3915 tcg_gen_add_i64(o->in2, o->in2, tsize);
3916 r1 = (r1 + 1) & 15;
3919 tcg_temp_free_i64(tsize);
3920 return NO_EXIT;
3923 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3925 int r1 = get_field(s->fields, r1);
3926 int r3 = get_field(s->fields, r3);
3927 TCGv_i64 t = tcg_temp_new_i64();
3928 TCGv_i64 t4 = tcg_const_i64(4);
3929 TCGv_i64 t32 = tcg_const_i64(32);
3931 while (1) {
3932 tcg_gen_shl_i64(t, regs[r1], t32);
3933 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3934 if (r1 == r3) {
3935 break;
3937 tcg_gen_add_i64(o->in2, o->in2, t4);
3938 r1 = (r1 + 1) & 15;
3941 tcg_temp_free_i64(t);
3942 tcg_temp_free_i64(t4);
3943 tcg_temp_free_i64(t32);
3944 return NO_EXIT;
3947 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3949 potential_page_fault(s);
3950 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3951 set_cc_static(s);
3952 return_low128(o->in2);
3953 return NO_EXIT;
3956 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3958 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3959 return NO_EXIT;
3962 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3964 DisasCompare cmp;
3965 TCGv_i64 borrow;
3967 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3969 /* The !borrow flag is the msb of CC. Since we want the inverse of
3970 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
3971 disas_jcc(s, &cmp, 8 | 4);
3972 borrow = tcg_temp_new_i64();
3973 if (cmp.is_64) {
3974 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
3975 } else {
3976 TCGv_i32 t = tcg_temp_new_i32();
3977 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
3978 tcg_gen_extu_i32_i64(borrow, t);
3979 tcg_temp_free_i32(t);
3981 free_compare(&cmp);
3983 tcg_gen_sub_i64(o->out, o->out, borrow);
3984 tcg_temp_free_i64(borrow);
3985 return NO_EXIT;
3988 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3990 TCGv_i32 t;
3992 update_psw_addr(s);
3993 update_cc_op(s);
3995 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3996 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3997 tcg_temp_free_i32(t);
3999 t = tcg_const_i32(s->next_pc - s->pc);
4000 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4001 tcg_temp_free_i32(t);
4003 gen_exception(EXCP_SVC);
4004 return EXIT_NORETURN;
4007 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
4009 gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4010 set_cc_static(s);
4011 return NO_EXIT;
4014 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
4016 gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4017 set_cc_static(s);
4018 return NO_EXIT;
4021 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
4023 gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4024 set_cc_static(s);
4025 return NO_EXIT;
4028 #ifndef CONFIG_USER_ONLY
4029 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
4031 potential_page_fault(s);
4032 gen_helper_tprot(cc_op, o->addr1, o->in2);
4033 set_cc_static(s);
4034 return NO_EXIT;
4036 #endif
4038 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
4040 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4041 potential_page_fault(s);
4042 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4043 tcg_temp_free_i32(l);
4044 set_cc_static(s);
4045 return NO_EXIT;
4048 static ExitStatus op_tre(DisasContext *s, DisasOps *o)
4050 potential_page_fault(s);
4051 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4052 return_low128(o->out2);
4053 set_cc_static(s);
4054 return NO_EXIT;
4057 static ExitStatus op_trt(DisasContext *s, DisasOps *o)
4059 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4060 potential_page_fault(s);
4061 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4062 tcg_temp_free_i32(l);
4063 set_cc_static(s);
4064 return NO_EXIT;
4067 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
4069 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4070 potential_page_fault(s);
4071 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4072 tcg_temp_free_i32(l);
4073 return NO_EXIT;
4076 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
4078 int d1 = get_field(s->fields, d1);
4079 int d2 = get_field(s->fields, d2);
4080 int b1 = get_field(s->fields, b1);
4081 int b2 = get_field(s->fields, b2);
4082 int l = get_field(s->fields, l1);
4083 TCGv_i32 t32;
4085 o->addr1 = get_address(s, 0, b1, d1);
4087 /* If the addresses are identical, this is a store/memset of zero. */
4088 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4089 o->in2 = tcg_const_i64(0);
4091 l++;
4092 while (l >= 8) {
4093 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4094 l -= 8;
4095 if (l > 0) {
4096 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4099 if (l >= 4) {
4100 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4101 l -= 4;
4102 if (l > 0) {
4103 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4106 if (l >= 2) {
4107 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4108 l -= 2;
4109 if (l > 0) {
4110 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4113 if (l) {
4114 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4116 gen_op_movi_cc(s, 0);
4117 return NO_EXIT;
4120 /* But in general we'll defer to a helper. */
4121 o->in2 = get_address(s, 0, b2, d2);
4122 t32 = tcg_const_i32(l);
4123 potential_page_fault(s);
4124 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4125 tcg_temp_free_i32(t32);
4126 set_cc_static(s);
4127 return NO_EXIT;
4130 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
4132 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4133 return NO_EXIT;
4136 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
4138 int shift = s->insn->data & 0xff;
4139 int size = s->insn->data >> 8;
4140 uint64_t mask = ((1ull << size) - 1) << shift;
4142 assert(!o->g_in2);
4143 tcg_gen_shli_i64(o->in2, o->in2, shift);
4144 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4146 /* Produce the CC from only the bits manipulated. */
4147 tcg_gen_andi_i64(cc_dst, o->out, mask);
4148 set_cc_nz_u64(s, cc_dst);
4149 return NO_EXIT;
4152 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
4154 o->out = tcg_const_i64(0);
4155 return NO_EXIT;
4158 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
4160 o->out = tcg_const_i64(0);
4161 o->out2 = o->out;
4162 o->g_out2 = true;
4163 return NO_EXIT;
4166 /* ====================================================================== */
4167 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4168 the original inputs), update the various cc data structures in order to
4169 be able to compute the new condition code. */
4171 static void cout_abs32(DisasContext *s, DisasOps *o)
4173 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4176 static void cout_abs64(DisasContext *s, DisasOps *o)
4178 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4181 static void cout_adds32(DisasContext *s, DisasOps *o)
4183 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4186 static void cout_adds64(DisasContext *s, DisasOps *o)
4188 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4191 static void cout_addu32(DisasContext *s, DisasOps *o)
4193 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
4196 static void cout_addu64(DisasContext *s, DisasOps *o)
4198 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
4201 static void cout_addc32(DisasContext *s, DisasOps *o)
4203 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
4206 static void cout_addc64(DisasContext *s, DisasOps *o)
4208 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
4211 static void cout_cmps32(DisasContext *s, DisasOps *o)
4213 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4216 static void cout_cmps64(DisasContext *s, DisasOps *o)
4218 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4221 static void cout_cmpu32(DisasContext *s, DisasOps *o)
4223 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
4226 static void cout_cmpu64(DisasContext *s, DisasOps *o)
4228 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
4231 static void cout_f32(DisasContext *s, DisasOps *o)
4233 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
4236 static void cout_f64(DisasContext *s, DisasOps *o)
4238 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
4241 static void cout_f128(DisasContext *s, DisasOps *o)
4243 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
4246 static void cout_nabs32(DisasContext *s, DisasOps *o)
4248 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
4251 static void cout_nabs64(DisasContext *s, DisasOps *o)
4253 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
4256 static void cout_neg32(DisasContext *s, DisasOps *o)
4258 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
4261 static void cout_neg64(DisasContext *s, DisasOps *o)
4263 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
4266 static void cout_nz32(DisasContext *s, DisasOps *o)
4268 tcg_gen_ext32u_i64(cc_dst, o->out);
4269 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
4272 static void cout_nz64(DisasContext *s, DisasOps *o)
4274 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
4277 static void cout_s32(DisasContext *s, DisasOps *o)
4279 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
4282 static void cout_s64(DisasContext *s, DisasOps *o)
4284 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
4287 static void cout_subs32(DisasContext *s, DisasOps *o)
4289 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
4292 static void cout_subs64(DisasContext *s, DisasOps *o)
4294 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
4297 static void cout_subu32(DisasContext *s, DisasOps *o)
4299 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
4302 static void cout_subu64(DisasContext *s, DisasOps *o)
4304 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
4307 static void cout_subb32(DisasContext *s, DisasOps *o)
4309 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
4312 static void cout_subb64(DisasContext *s, DisasOps *o)
4314 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
4317 static void cout_tm32(DisasContext *s, DisasOps *o)
4319 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
4322 static void cout_tm64(DisasContext *s, DisasOps *o)
4324 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
4327 /* ====================================================================== */
4328 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4329 with the TCG register to which we will write. Used in combination with
4330 the "wout" generators, in some cases we need a new temporary, and in
4331 some cases we can write to a TCG global. */
4333 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
4335 o->out = tcg_temp_new_i64();
4337 #define SPEC_prep_new 0
4339 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
4341 o->out = tcg_temp_new_i64();
4342 o->out2 = tcg_temp_new_i64();
4344 #define SPEC_prep_new_P 0
4346 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4348 o->out = regs[get_field(f, r1)];
4349 o->g_out = true;
4351 #define SPEC_prep_r1 0
4353 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
4355 int r1 = get_field(f, r1);
4356 o->out = regs[r1];
4357 o->out2 = regs[r1 + 1];
4358 o->g_out = o->g_out2 = true;
4360 #define SPEC_prep_r1_P SPEC_r1_even
4362 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4364 o->out = fregs[get_field(f, r1)];
4365 o->g_out = true;
4367 #define SPEC_prep_f1 0
4369 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4371 int r1 = get_field(f, r1);
4372 o->out = fregs[r1];
4373 o->out2 = fregs[r1 + 2];
4374 o->g_out = o->g_out2 = true;
4376 #define SPEC_prep_x1 SPEC_r1_f128
4378 /* ====================================================================== */
4379 /* The "Write OUTput" generators. These generally perform some non-trivial
4380 copy of data to TCG globals, or to main memory. The trivial cases are
4381 generally handled by having a "prep" generator install the TCG global
4382 as the destination of the operation. */
4384 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4386 store_reg(get_field(f, r1), o->out);
4388 #define SPEC_wout_r1 0
4390 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4392 int r1 = get_field(f, r1);
4393 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
4395 #define SPEC_wout_r1_8 0
4397 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4399 int r1 = get_field(f, r1);
4400 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
4402 #define SPEC_wout_r1_16 0
4404 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4406 store_reg32_i64(get_field(f, r1), o->out);
4408 #define SPEC_wout_r1_32 0
4410 static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
4412 store_reg32h_i64(get_field(f, r1), o->out);
4414 #define SPEC_wout_r1_32h 0
4416 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4418 int r1 = get_field(f, r1);
4419 store_reg32_i64(r1, o->out);
4420 store_reg32_i64(r1 + 1, o->out2);
4422 #define SPEC_wout_r1_P32 SPEC_r1_even
4424 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4426 int r1 = get_field(f, r1);
4427 store_reg32_i64(r1 + 1, o->out);
4428 tcg_gen_shri_i64(o->out, o->out, 32);
4429 store_reg32_i64(r1, o->out);
4431 #define SPEC_wout_r1_D32 SPEC_r1_even
4433 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4435 store_freg32_i64(get_field(f, r1), o->out);
4437 #define SPEC_wout_e1 0
4439 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4441 store_freg(get_field(f, r1), o->out);
4443 #define SPEC_wout_f1 0
4445 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4447 int f1 = get_field(s->fields, r1);
4448 store_freg(f1, o->out);
4449 store_freg(f1 + 2, o->out2);
4451 #define SPEC_wout_x1 SPEC_r1_f128
4453 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4455 if (get_field(f, r1) != get_field(f, r2)) {
4456 store_reg32_i64(get_field(f, r1), o->out);
4459 #define SPEC_wout_cond_r1r2_32 0
4461 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
4463 if (get_field(f, r1) != get_field(f, r2)) {
4464 store_freg32_i64(get_field(f, r1), o->out);
4467 #define SPEC_wout_cond_e1e2 0
4469 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4471 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
4473 #define SPEC_wout_m1_8 0
4475 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4477 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
4479 #define SPEC_wout_m1_16 0
4481 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4483 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4485 #define SPEC_wout_m1_32 0
4487 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4489 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4491 #define SPEC_wout_m1_64 0
4493 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4495 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
4497 #define SPEC_wout_m2_32 0
4499 static void wout_m2_32_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4501 /* XXX release reservation */
4502 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4503 store_reg32_i64(get_field(f, r1), o->in2);
4505 #define SPEC_wout_m2_32_r1_atomic 0
4507 static void wout_m2_64_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4509 /* XXX release reservation */
4510 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4511 store_reg(get_field(f, r1), o->in2);
4513 #define SPEC_wout_m2_64_r1_atomic 0
4515 /* ====================================================================== */
4516 /* The "INput 1" generators. These load the first operand to an insn. */
4518 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4520 o->in1 = load_reg(get_field(f, r1));
4522 #define SPEC_in1_r1 0
4524 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4526 o->in1 = regs[get_field(f, r1)];
4527 o->g_in1 = true;
4529 #define SPEC_in1_r1_o 0
4531 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4533 o->in1 = tcg_temp_new_i64();
4534 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
4536 #define SPEC_in1_r1_32s 0
4538 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4540 o->in1 = tcg_temp_new_i64();
4541 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
4543 #define SPEC_in1_r1_32u 0
4545 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4547 o->in1 = tcg_temp_new_i64();
4548 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
4550 #define SPEC_in1_r1_sr32 0
4552 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
4554 o->in1 = load_reg(get_field(f, r1) + 1);
4556 #define SPEC_in1_r1p1 SPEC_r1_even
4558 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4560 o->in1 = tcg_temp_new_i64();
4561 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
4563 #define SPEC_in1_r1p1_32s SPEC_r1_even
4565 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4567 o->in1 = tcg_temp_new_i64();
4568 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
4570 #define SPEC_in1_r1p1_32u SPEC_r1_even
4572 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4574 int r1 = get_field(f, r1);
4575 o->in1 = tcg_temp_new_i64();
4576 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
4578 #define SPEC_in1_r1_D32 SPEC_r1_even
4580 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4582 o->in1 = load_reg(get_field(f, r2));
4584 #define SPEC_in1_r2 0
4586 static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4588 o->in1 = tcg_temp_new_i64();
4589 tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
4591 #define SPEC_in1_r2_sr32 0
4593 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4595 o->in1 = load_reg(get_field(f, r3));
4597 #define SPEC_in1_r3 0
4599 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4601 o->in1 = regs[get_field(f, r3)];
4602 o->g_in1 = true;
4604 #define SPEC_in1_r3_o 0
4606 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4608 o->in1 = tcg_temp_new_i64();
4609 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
4611 #define SPEC_in1_r3_32s 0
4613 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4615 o->in1 = tcg_temp_new_i64();
4616 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
4618 #define SPEC_in1_r3_32u 0
4620 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4622 int r3 = get_field(f, r3);
4623 o->in1 = tcg_temp_new_i64();
4624 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
4626 #define SPEC_in1_r3_D32 SPEC_r3_even
4628 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4630 o->in1 = load_freg32_i64(get_field(f, r1));
4632 #define SPEC_in1_e1 0
4634 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4636 o->in1 = fregs[get_field(f, r1)];
4637 o->g_in1 = true;
4639 #define SPEC_in1_f1_o 0
4641 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4643 int r1 = get_field(f, r1);
4644 o->out = fregs[r1];
4645 o->out2 = fregs[r1 + 2];
4646 o->g_out = o->g_out2 = true;
4648 #define SPEC_in1_x1_o SPEC_r1_f128
4650 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4652 o->in1 = fregs[get_field(f, r3)];
4653 o->g_in1 = true;
4655 #define SPEC_in1_f3_o 0
4657 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
4659 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
4661 #define SPEC_in1_la1 0
4663 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
4665 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4666 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4668 #define SPEC_in1_la2 0
4670 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4672 in1_la1(s, f, o);
4673 o->in1 = tcg_temp_new_i64();
4674 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
4676 #define SPEC_in1_m1_8u 0
4678 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4680 in1_la1(s, f, o);
4681 o->in1 = tcg_temp_new_i64();
4682 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
4684 #define SPEC_in1_m1_16s 0
4686 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4688 in1_la1(s, f, o);
4689 o->in1 = tcg_temp_new_i64();
4690 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
4692 #define SPEC_in1_m1_16u 0
4694 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4696 in1_la1(s, f, o);
4697 o->in1 = tcg_temp_new_i64();
4698 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4700 #define SPEC_in1_m1_32s 0
4702 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4704 in1_la1(s, f, o);
4705 o->in1 = tcg_temp_new_i64();
4706 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4708 #define SPEC_in1_m1_32u 0
4710 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4712 in1_la1(s, f, o);
4713 o->in1 = tcg_temp_new_i64();
4714 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4716 #define SPEC_in1_m1_64 0
4718 /* ====================================================================== */
4719 /* The "INput 2" generators. These load the second operand to an insn. */
4721 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4723 o->in2 = regs[get_field(f, r1)];
4724 o->g_in2 = true;
4726 #define SPEC_in2_r1_o 0
4728 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4730 o->in2 = tcg_temp_new_i64();
4731 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
4733 #define SPEC_in2_r1_16u 0
4735 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4737 o->in2 = tcg_temp_new_i64();
4738 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
4740 #define SPEC_in2_r1_32u 0
4742 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4744 int r1 = get_field(f, r1);
4745 o->in2 = tcg_temp_new_i64();
4746 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
4748 #define SPEC_in2_r1_D32 SPEC_r1_even
4750 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4752 o->in2 = load_reg(get_field(f, r2));
4754 #define SPEC_in2_r2 0
4756 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4758 o->in2 = regs[get_field(f, r2)];
4759 o->g_in2 = true;
4761 #define SPEC_in2_r2_o 0
4763 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4765 int r2 = get_field(f, r2);
4766 if (r2 != 0) {
4767 o->in2 = load_reg(r2);
4770 #define SPEC_in2_r2_nz 0
4772 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4774 o->in2 = tcg_temp_new_i64();
4775 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4777 #define SPEC_in2_r2_8s 0
4779 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4781 o->in2 = tcg_temp_new_i64();
4782 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4784 #define SPEC_in2_r2_8u 0
4786 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4788 o->in2 = tcg_temp_new_i64();
4789 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4791 #define SPEC_in2_r2_16s 0
4793 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4795 o->in2 = tcg_temp_new_i64();
4796 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4798 #define SPEC_in2_r2_16u 0
4800 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4802 o->in2 = load_reg(get_field(f, r3));
4804 #define SPEC_in2_r3 0
4806 static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4808 o->in2 = tcg_temp_new_i64();
4809 tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
4811 #define SPEC_in2_r3_sr32 0
4813 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4815 o->in2 = tcg_temp_new_i64();
4816 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4818 #define SPEC_in2_r2_32s 0
4820 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4822 o->in2 = tcg_temp_new_i64();
4823 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4825 #define SPEC_in2_r2_32u 0
4827 static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4829 o->in2 = tcg_temp_new_i64();
4830 tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
4832 #define SPEC_in2_r2_sr32 0
4834 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
4836 o->in2 = load_freg32_i64(get_field(f, r2));
4838 #define SPEC_in2_e2 0
4840 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4842 o->in2 = fregs[get_field(f, r2)];
4843 o->g_in2 = true;
4845 #define SPEC_in2_f2_o 0
4847 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4849 int r2 = get_field(f, r2);
4850 o->in1 = fregs[r2];
4851 o->in2 = fregs[r2 + 2];
4852 o->g_in1 = o->g_in2 = true;
4854 #define SPEC_in2_x2_o SPEC_r2_f128
4856 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
4858 o->in2 = get_address(s, 0, get_field(f, r2), 0);
4860 #define SPEC_in2_ra2 0
4862 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
4864 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4865 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4867 #define SPEC_in2_a2 0
4869 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
4871 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
4873 #define SPEC_in2_ri2 0
4875 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
4877 help_l2_shift(s, f, o, 31);
4879 #define SPEC_in2_sh32 0
4881 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
4883 help_l2_shift(s, f, o, 63);
4885 #define SPEC_in2_sh64 0
4887 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4889 in2_a2(s, f, o);
4890 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
4892 #define SPEC_in2_m2_8u 0
4894 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4896 in2_a2(s, f, o);
4897 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
4899 #define SPEC_in2_m2_16s 0
4901 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4903 in2_a2(s, f, o);
4904 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4906 #define SPEC_in2_m2_16u 0
4908 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4910 in2_a2(s, f, o);
4911 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4913 #define SPEC_in2_m2_32s 0
4915 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4917 in2_a2(s, f, o);
4918 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4920 #define SPEC_in2_m2_32u 0
4922 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4924 in2_a2(s, f, o);
4925 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4927 #define SPEC_in2_m2_64 0
4929 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4931 in2_ri2(s, f, o);
4932 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4934 #define SPEC_in2_mri2_16u 0
4936 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4938 in2_ri2(s, f, o);
4939 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4941 #define SPEC_in2_mri2_32s 0
4943 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4945 in2_ri2(s, f, o);
4946 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4948 #define SPEC_in2_mri2_32u 0
4950 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4952 in2_ri2(s, f, o);
4953 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4955 #define SPEC_in2_mri2_64 0
4957 static void in2_m2_32s_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4959 /* XXX should reserve the address */
4960 in1_la2(s, f, o);
4961 o->in2 = tcg_temp_new_i64();
4962 tcg_gen_qemu_ld32s(o->in2, o->addr1, get_mem_index(s));
4964 #define SPEC_in2_m2_32s_atomic 0
4966 static void in2_m2_64_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4968 /* XXX should reserve the address */
4969 in1_la2(s, f, o);
4970 o->in2 = tcg_temp_new_i64();
4971 tcg_gen_qemu_ld64(o->in2, o->addr1, get_mem_index(s));
4973 #define SPEC_in2_m2_64_atomic 0
4975 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
4977 o->in2 = tcg_const_i64(get_field(f, i2));
4979 #define SPEC_in2_i2 0
4981 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4983 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
4985 #define SPEC_in2_i2_8u 0
4987 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4989 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
4991 #define SPEC_in2_i2_16u 0
4993 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4995 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
4997 #define SPEC_in2_i2_32u 0
4999 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5001 uint64_t i2 = (uint16_t)get_field(f, i2);
5002 o->in2 = tcg_const_i64(i2 << s->insn->data);
5004 #define SPEC_in2_i2_16u_shl 0
5006 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5008 uint64_t i2 = (uint32_t)get_field(f, i2);
5009 o->in2 = tcg_const_i64(i2 << s->insn->data);
5011 #define SPEC_in2_i2_32u_shl 0
5013 #ifndef CONFIG_USER_ONLY
5014 static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
5016 o->in2 = tcg_const_i64(s->fields->raw_insn);
5018 #define SPEC_in2_insn 0
5019 #endif
5021 /* ====================================================================== */
5023 /* Find opc within the table of insns. This is formulated as a switch
5024 statement so that (1) we get compile-time notice of cut-paste errors
5025 for duplicated opcodes, and (2) the compiler generates the binary
5026 search tree, rather than us having to post-process the table. */
5028 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5029 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5031 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5033 enum DisasInsnEnum {
5034 #include "insn-data.def"
5037 #undef D
5038 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5039 .opc = OPC, \
5040 .fmt = FMT_##FT, \
5041 .fac = FAC_##FC, \
5042 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5043 .name = #NM, \
5044 .help_in1 = in1_##I1, \
5045 .help_in2 = in2_##I2, \
5046 .help_prep = prep_##P, \
5047 .help_wout = wout_##W, \
5048 .help_cout = cout_##CC, \
5049 .help_op = op_##OP, \
5050 .data = D \
5053 /* Allow 0 to be used for NULL in the table below. */
5054 #define in1_0 NULL
5055 #define in2_0 NULL
5056 #define prep_0 NULL
5057 #define wout_0 NULL
5058 #define cout_0 NULL
5059 #define op_0 NULL
5061 #define SPEC_in1_0 0
5062 #define SPEC_in2_0 0
5063 #define SPEC_prep_0 0
5064 #define SPEC_wout_0 0
5066 static const DisasInsn insn_info[] = {
5067 #include "insn-data.def"
5070 #undef D
5071 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5072 case OPC: return &insn_info[insn_ ## NM];
5074 static const DisasInsn *lookup_opc(uint16_t opc)
5076 switch (opc) {
5077 #include "insn-data.def"
5078 default:
5079 return NULL;
5083 #undef D
5084 #undef C
5086 /* Extract a field from the insn. The INSN should be left-aligned in
5087 the uint64_t so that we can more easily utilize the big-bit-endian
5088 definitions we extract from the Principals of Operation. */
5090 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
5092 uint32_t r, m;
5094 if (f->size == 0) {
5095 return;
5098 /* Zero extract the field from the insn. */
5099 r = (insn << f->beg) >> (64 - f->size);
5101 /* Sign-extend, or un-swap the field as necessary. */
5102 switch (f->type) {
5103 case 0: /* unsigned */
5104 break;
5105 case 1: /* signed */
5106 assert(f->size <= 32);
5107 m = 1u << (f->size - 1);
5108 r = (r ^ m) - m;
5109 break;
5110 case 2: /* dl+dh split, signed 20 bit. */
5111 r = ((int8_t)r << 12) | (r >> 8);
5112 break;
5113 default:
5114 abort();
5117 /* Validate that the "compressed" encoding we selected above is valid.
5118 I.e. we havn't make two different original fields overlap. */
5119 assert(((o->presentC >> f->indexC) & 1) == 0);
5120 o->presentC |= 1 << f->indexC;
5121 o->presentO |= 1 << f->indexO;
5123 o->c[f->indexC] = r;
5126 /* Lookup the insn at the current PC, extracting the operands into O and
5127 returning the info struct for the insn. Returns NULL for invalid insn. */
5129 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
5130 DisasFields *f)
5132 uint64_t insn, pc = s->pc;
5133 int op, op2, ilen;
5134 const DisasInsn *info;
5136 insn = ld_code2(env, pc);
5137 op = (insn >> 8) & 0xff;
5138 ilen = get_ilen(op);
5139 s->next_pc = s->pc + ilen;
5141 switch (ilen) {
5142 case 2:
5143 insn = insn << 48;
5144 break;
5145 case 4:
5146 insn = ld_code4(env, pc) << 32;
5147 break;
5148 case 6:
5149 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
5150 break;
5151 default:
5152 abort();
5155 /* We can't actually determine the insn format until we've looked up
5156 the full insn opcode. Which we can't do without locating the
5157 secondary opcode. Assume by default that OP2 is at bit 40; for
5158 those smaller insns that don't actually have a secondary opcode
5159 this will correctly result in OP2 = 0. */
5160 switch (op) {
5161 case 0x01: /* E */
5162 case 0x80: /* S */
5163 case 0x82: /* S */
5164 case 0x93: /* S */
5165 case 0xb2: /* S, RRF, RRE */
5166 case 0xb3: /* RRE, RRD, RRF */
5167 case 0xb9: /* RRE, RRF */
5168 case 0xe5: /* SSE, SIL */
5169 op2 = (insn << 8) >> 56;
5170 break;
5171 case 0xa5: /* RI */
5172 case 0xa7: /* RI */
5173 case 0xc0: /* RIL */
5174 case 0xc2: /* RIL */
5175 case 0xc4: /* RIL */
5176 case 0xc6: /* RIL */
5177 case 0xc8: /* SSF */
5178 case 0xcc: /* RIL */
5179 op2 = (insn << 12) >> 60;
5180 break;
5181 case 0xd0 ... 0xdf: /* SS */
5182 case 0xe1: /* SS */
5183 case 0xe2: /* SS */
5184 case 0xe8: /* SS */
5185 case 0xe9: /* SS */
5186 case 0xea: /* SS */
5187 case 0xee ... 0xf3: /* SS */
5188 case 0xf8 ... 0xfd: /* SS */
5189 op2 = 0;
5190 break;
5191 default:
5192 op2 = (insn << 40) >> 56;
5193 break;
5196 memset(f, 0, sizeof(*f));
5197 f->raw_insn = insn;
5198 f->op = op;
5199 f->op2 = op2;
5201 /* Lookup the instruction. */
5202 info = lookup_opc(op << 8 | op2);
5204 /* If we found it, extract the operands. */
5205 if (info != NULL) {
5206 DisasFormat fmt = info->fmt;
5207 int i;
5209 for (i = 0; i < NUM_C_FIELD; ++i) {
5210 extract_field(f, &format_info[fmt].op[i], insn);
5213 return info;
5216 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
5218 const DisasInsn *insn;
5219 ExitStatus ret = NO_EXIT;
5220 DisasFields f;
5221 DisasOps o;
5223 /* Search for the insn in the table. */
5224 insn = extract_insn(env, s, &f);
5226 /* Not found means unimplemented/illegal opcode. */
5227 if (insn == NULL) {
5228 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
5229 f.op, f.op2);
5230 gen_illegal_opcode(s);
5231 return EXIT_NORETURN;
5234 #ifndef CONFIG_USER_ONLY
5235 if (s->tb->flags & FLAG_MASK_PER) {
5236 TCGv_i64 addr = tcg_const_i64(s->pc);
5237 gen_helper_per_ifetch(cpu_env, addr);
5238 tcg_temp_free_i64(addr);
5240 #endif
5242 /* Check for insn specification exceptions. */
5243 if (insn->spec) {
5244 int spec = insn->spec, excp = 0, r;
5246 if (spec & SPEC_r1_even) {
5247 r = get_field(&f, r1);
5248 if (r & 1) {
5249 excp = PGM_SPECIFICATION;
5252 if (spec & SPEC_r2_even) {
5253 r = get_field(&f, r2);
5254 if (r & 1) {
5255 excp = PGM_SPECIFICATION;
5258 if (spec & SPEC_r3_even) {
5259 r = get_field(&f, r3);
5260 if (r & 1) {
5261 excp = PGM_SPECIFICATION;
5264 if (spec & SPEC_r1_f128) {
5265 r = get_field(&f, r1);
5266 if (r > 13) {
5267 excp = PGM_SPECIFICATION;
5270 if (spec & SPEC_r2_f128) {
5271 r = get_field(&f, r2);
5272 if (r > 13) {
5273 excp = PGM_SPECIFICATION;
5276 if (excp) {
5277 gen_program_exception(s, excp);
5278 return EXIT_NORETURN;
5282 /* Set up the strutures we use to communicate with the helpers. */
5283 s->insn = insn;
5284 s->fields = &f;
5285 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
5286 TCGV_UNUSED_I64(o.out);
5287 TCGV_UNUSED_I64(o.out2);
5288 TCGV_UNUSED_I64(o.in1);
5289 TCGV_UNUSED_I64(o.in2);
5290 TCGV_UNUSED_I64(o.addr1);
5292 /* Implement the instruction. */
5293 if (insn->help_in1) {
5294 insn->help_in1(s, &f, &o);
5296 if (insn->help_in2) {
5297 insn->help_in2(s, &f, &o);
5299 if (insn->help_prep) {
5300 insn->help_prep(s, &f, &o);
5302 if (insn->help_op) {
5303 ret = insn->help_op(s, &o);
5305 if (insn->help_wout) {
5306 insn->help_wout(s, &f, &o);
5308 if (insn->help_cout) {
5309 insn->help_cout(s, &o);
5312 /* Free any temporaries created by the helpers. */
5313 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
5314 tcg_temp_free_i64(o.out);
5316 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
5317 tcg_temp_free_i64(o.out2);
5319 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
5320 tcg_temp_free_i64(o.in1);
5322 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
5323 tcg_temp_free_i64(o.in2);
5325 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
5326 tcg_temp_free_i64(o.addr1);
5329 #ifndef CONFIG_USER_ONLY
5330 if (s->tb->flags & FLAG_MASK_PER) {
5331 /* An exception might be triggered, save PSW if not already done. */
5332 if (ret == NO_EXIT || ret == EXIT_PC_STALE) {
5333 tcg_gen_movi_i64(psw_addr, s->next_pc);
5336 /* Save off cc. */
5337 update_cc_op(s);
5339 /* Call the helper to check for a possible PER exception. */
5340 gen_helper_per_check_exception(cpu_env);
5342 #endif
5344 /* Advance to the next instruction. */
5345 s->pc = s->next_pc;
5346 return ret;
5349 void gen_intermediate_code(CPUS390XState *env, struct TranslationBlock *tb)
5351 S390CPU *cpu = s390_env_get_cpu(env);
5352 CPUState *cs = CPU(cpu);
5353 DisasContext dc;
5354 target_ulong pc_start;
5355 uint64_t next_page_start;
5356 int num_insns, max_insns;
5357 ExitStatus status;
5358 bool do_debug;
5360 pc_start = tb->pc;
5362 /* 31-bit mode */
5363 if (!(tb->flags & FLAG_MASK_64)) {
5364 pc_start &= 0x7fffffff;
5367 dc.tb = tb;
5368 dc.pc = pc_start;
5369 dc.cc_op = CC_OP_DYNAMIC;
5370 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
5372 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
5374 num_insns = 0;
5375 max_insns = tb->cflags & CF_COUNT_MASK;
5376 if (max_insns == 0) {
5377 max_insns = CF_COUNT_MASK;
5379 if (max_insns > TCG_MAX_INSNS) {
5380 max_insns = TCG_MAX_INSNS;
5383 gen_tb_start(tb);
5385 do {
5386 tcg_gen_insn_start(dc.pc, dc.cc_op);
5387 num_insns++;
5389 if (unlikely(cpu_breakpoint_test(cs, dc.pc, BP_ANY))) {
5390 status = EXIT_PC_STALE;
5391 do_debug = true;
5392 /* The address covered by the breakpoint must be included in
5393 [tb->pc, tb->pc + tb->size) in order to for it to be
5394 properly cleared -- thus we increment the PC here so that
5395 the logic setting tb->size below does the right thing. */
5396 dc.pc += 2;
5397 break;
5400 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5401 gen_io_start();
5404 status = NO_EXIT;
5405 if (status == NO_EXIT) {
5406 status = translate_one(env, &dc);
5409 /* If we reach a page boundary, are single stepping,
5410 or exhaust instruction count, stop generation. */
5411 if (status == NO_EXIT
5412 && (dc.pc >= next_page_start
5413 || tcg_op_buf_full()
5414 || num_insns >= max_insns
5415 || singlestep
5416 || cs->singlestep_enabled)) {
5417 status = EXIT_PC_STALE;
5419 } while (status == NO_EXIT);
5421 if (tb->cflags & CF_LAST_IO) {
5422 gen_io_end();
5425 switch (status) {
5426 case EXIT_GOTO_TB:
5427 case EXIT_NORETURN:
5428 break;
5429 case EXIT_PC_STALE:
5430 update_psw_addr(&dc);
5431 /* FALLTHRU */
5432 case EXIT_PC_UPDATED:
5433 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5434 cc op type is in env */
5435 update_cc_op(&dc);
5436 /* Exit the TB, either by raising a debug exception or by return. */
5437 if (do_debug) {
5438 gen_exception(EXCP_DEBUG);
5439 } else {
5440 tcg_gen_exit_tb(0);
5442 break;
5443 default:
5444 abort();
5447 gen_tb_end(tb, num_insns);
5449 tb->size = dc.pc - pc_start;
5450 tb->icount = num_insns;
5452 #if defined(S390X_DEBUG_DISAS)
5453 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
5454 && qemu_log_in_addr_range(pc_start)) {
5455 qemu_log_lock();
5456 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5457 log_target_disas(cs, pc_start, dc.pc - pc_start, 1);
5458 qemu_log("\n");
5459 qemu_log_unlock();
5461 #endif
5464 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
5465 target_ulong *data)
5467 int cc_op = data[1];
5468 env->psw.addr = data[0];
5469 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
5470 env->cc_op = cc_op;