target/s390x: implement MOVE INVERSE
[qemu/ar7.git] / target / s390x / translate.c
bloba9b96e7e9954bb5d7db4f47f4680dd4b1effd3c7
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "disas/disas.h"
34 #include "exec/exec-all.h"
35 #include "tcg-op.h"
36 #include "qemu/log.h"
37 #include "qemu/host-utils.h"
38 #include "exec/cpu_ldst.h"
40 /* global register indexes */
41 static TCGv_env cpu_env;
43 #include "exec/gen-icount.h"
44 #include "exec/helper-proto.h"
45 #include "exec/helper-gen.h"
47 #include "trace-tcg.h"
48 #include "exec/log.h"
51 /* Information that (most) every instruction needs to manipulate. */
52 typedef struct DisasContext DisasContext;
53 typedef struct DisasInsn DisasInsn;
54 typedef struct DisasFields DisasFields;
56 struct DisasContext {
57 struct TranslationBlock *tb;
58 const DisasInsn *insn;
59 DisasFields *fields;
60 uint64_t ex_value;
61 uint64_t pc, next_pc;
62 uint32_t ilen;
63 enum cc_op cc_op;
64 bool singlestep_enabled;
67 /* Information carried about a condition to be evaluated. */
68 typedef struct {
69 TCGCond cond:8;
70 bool is_64;
71 bool g1;
72 bool g2;
73 union {
74 struct { TCGv_i64 a, b; } s64;
75 struct { TCGv_i32 a, b; } s32;
76 } u;
77 } DisasCompare;
79 #define DISAS_EXCP 4
81 #ifdef DEBUG_INLINE_BRANCHES
82 static uint64_t inline_branch_hit[CC_OP_MAX];
83 static uint64_t inline_branch_miss[CC_OP_MAX];
84 #endif
86 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
88 if (!(s->tb->flags & FLAG_MASK_64)) {
89 if (s->tb->flags & FLAG_MASK_32) {
90 return pc | 0x80000000;
93 return pc;
96 void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
97 int flags)
99 S390CPU *cpu = S390_CPU(cs);
100 CPUS390XState *env = &cpu->env;
101 int i;
103 if (env->cc_op > 3) {
104 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
105 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
106 } else {
107 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
108 env->psw.mask, env->psw.addr, env->cc_op);
111 for (i = 0; i < 16; i++) {
112 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
113 if ((i % 4) == 3) {
114 cpu_fprintf(f, "\n");
115 } else {
116 cpu_fprintf(f, " ");
120 for (i = 0; i < 16; i++) {
121 cpu_fprintf(f, "F%02d=%016" PRIx64, i, get_freg(env, i)->ll);
122 if ((i % 4) == 3) {
123 cpu_fprintf(f, "\n");
124 } else {
125 cpu_fprintf(f, " ");
129 for (i = 0; i < 32; i++) {
130 cpu_fprintf(f, "V%02d=%016" PRIx64 "%016" PRIx64, i,
131 env->vregs[i][0].ll, env->vregs[i][1].ll);
132 cpu_fprintf(f, (i % 2) ? "\n" : " ");
135 #ifndef CONFIG_USER_ONLY
136 for (i = 0; i < 16; i++) {
137 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
138 if ((i % 4) == 3) {
139 cpu_fprintf(f, "\n");
140 } else {
141 cpu_fprintf(f, " ");
144 #endif
146 #ifdef DEBUG_INLINE_BRANCHES
147 for (i = 0; i < CC_OP_MAX; i++) {
148 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
149 inline_branch_miss[i], inline_branch_hit[i]);
151 #endif
153 cpu_fprintf(f, "\n");
156 static TCGv_i64 psw_addr;
157 static TCGv_i64 psw_mask;
158 static TCGv_i64 gbea;
160 static TCGv_i32 cc_op;
161 static TCGv_i64 cc_src;
162 static TCGv_i64 cc_dst;
163 static TCGv_i64 cc_vr;
165 static char cpu_reg_names[32][4];
166 static TCGv_i64 regs[16];
167 static TCGv_i64 fregs[16];
169 void s390x_translate_init(void)
171 int i;
173 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
174 tcg_ctx.tcg_env = cpu_env;
175 psw_addr = tcg_global_mem_new_i64(cpu_env,
176 offsetof(CPUS390XState, psw.addr),
177 "psw_addr");
178 psw_mask = tcg_global_mem_new_i64(cpu_env,
179 offsetof(CPUS390XState, psw.mask),
180 "psw_mask");
181 gbea = tcg_global_mem_new_i64(cpu_env,
182 offsetof(CPUS390XState, gbea),
183 "gbea");
185 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
186 "cc_op");
187 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
188 "cc_src");
189 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
190 "cc_dst");
191 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
192 "cc_vr");
194 for (i = 0; i < 16; i++) {
195 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
196 regs[i] = tcg_global_mem_new(cpu_env,
197 offsetof(CPUS390XState, regs[i]),
198 cpu_reg_names[i]);
201 for (i = 0; i < 16; i++) {
202 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
203 fregs[i] = tcg_global_mem_new(cpu_env,
204 offsetof(CPUS390XState, vregs[i][0].d),
205 cpu_reg_names[i + 16]);
209 static TCGv_i64 load_reg(int reg)
211 TCGv_i64 r = tcg_temp_new_i64();
212 tcg_gen_mov_i64(r, regs[reg]);
213 return r;
216 static TCGv_i64 load_freg32_i64(int reg)
218 TCGv_i64 r = tcg_temp_new_i64();
219 tcg_gen_shri_i64(r, fregs[reg], 32);
220 return r;
223 static void store_reg(int reg, TCGv_i64 v)
225 tcg_gen_mov_i64(regs[reg], v);
228 static void store_freg(int reg, TCGv_i64 v)
230 tcg_gen_mov_i64(fregs[reg], v);
233 static void store_reg32_i64(int reg, TCGv_i64 v)
235 /* 32 bit register writes keep the upper half */
236 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
239 static void store_reg32h_i64(int reg, TCGv_i64 v)
241 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
244 static void store_freg32_i64(int reg, TCGv_i64 v)
246 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
249 static void return_low128(TCGv_i64 dest)
251 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
254 static void update_psw_addr(DisasContext *s)
256 /* psw.addr */
257 tcg_gen_movi_i64(psw_addr, s->pc);
260 static void per_branch(DisasContext *s, bool to_next)
262 #ifndef CONFIG_USER_ONLY
263 tcg_gen_movi_i64(gbea, s->pc);
265 if (s->tb->flags & FLAG_MASK_PER) {
266 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->next_pc) : psw_addr;
267 gen_helper_per_branch(cpu_env, gbea, next_pc);
268 if (to_next) {
269 tcg_temp_free_i64(next_pc);
272 #endif
275 static void per_branch_cond(DisasContext *s, TCGCond cond,
276 TCGv_i64 arg1, TCGv_i64 arg2)
278 #ifndef CONFIG_USER_ONLY
279 if (s->tb->flags & FLAG_MASK_PER) {
280 TCGLabel *lab = gen_new_label();
281 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
283 tcg_gen_movi_i64(gbea, s->pc);
284 gen_helper_per_branch(cpu_env, gbea, psw_addr);
286 gen_set_label(lab);
287 } else {
288 TCGv_i64 pc = tcg_const_i64(s->pc);
289 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
290 tcg_temp_free_i64(pc);
292 #endif
295 static void per_breaking_event(DisasContext *s)
297 tcg_gen_movi_i64(gbea, s->pc);
300 static void update_cc_op(DisasContext *s)
302 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
303 tcg_gen_movi_i32(cc_op, s->cc_op);
307 static void potential_page_fault(DisasContext *s)
309 update_psw_addr(s);
310 update_cc_op(s);
313 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
315 return (uint64_t)cpu_lduw_code(env, pc);
318 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
320 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
323 static int get_mem_index(DisasContext *s)
325 switch (s->tb->flags & FLAG_MASK_ASC) {
326 case PSW_ASC_PRIMARY >> 32:
327 return 0;
328 case PSW_ASC_SECONDARY >> 32:
329 return 1;
330 case PSW_ASC_HOME >> 32:
331 return 2;
332 default:
333 tcg_abort();
334 break;
338 static void gen_exception(int excp)
340 TCGv_i32 tmp = tcg_const_i32(excp);
341 gen_helper_exception(cpu_env, tmp);
342 tcg_temp_free_i32(tmp);
345 static void gen_program_exception(DisasContext *s, int code)
347 TCGv_i32 tmp;
349 /* Remember what pgm exeption this was. */
350 tmp = tcg_const_i32(code);
351 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
352 tcg_temp_free_i32(tmp);
354 tmp = tcg_const_i32(s->ilen);
355 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
356 tcg_temp_free_i32(tmp);
358 /* Advance past instruction. */
359 s->pc = s->next_pc;
360 update_psw_addr(s);
362 /* Save off cc. */
363 update_cc_op(s);
365 /* Trigger exception. */
366 gen_exception(EXCP_PGM);
369 static inline void gen_illegal_opcode(DisasContext *s)
371 gen_program_exception(s, PGM_OPERATION);
374 static inline void gen_trap(DisasContext *s)
376 TCGv_i32 t;
378 /* Set DXC to 0xff. */
379 t = tcg_temp_new_i32();
380 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
381 tcg_gen_ori_i32(t, t, 0xff00);
382 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
383 tcg_temp_free_i32(t);
385 gen_program_exception(s, PGM_DATA);
388 #ifndef CONFIG_USER_ONLY
389 static void check_privileged(DisasContext *s)
391 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
392 gen_program_exception(s, PGM_PRIVILEGED);
395 #endif
397 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
399 TCGv_i64 tmp = tcg_temp_new_i64();
400 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
402 /* Note that d2 is limited to 20 bits, signed. If we crop negative
403 displacements early we create larger immedate addends. */
405 /* Note that addi optimizes the imm==0 case. */
406 if (b2 && x2) {
407 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
408 tcg_gen_addi_i64(tmp, tmp, d2);
409 } else if (b2) {
410 tcg_gen_addi_i64(tmp, regs[b2], d2);
411 } else if (x2) {
412 tcg_gen_addi_i64(tmp, regs[x2], d2);
413 } else {
414 if (need_31) {
415 d2 &= 0x7fffffff;
416 need_31 = false;
418 tcg_gen_movi_i64(tmp, d2);
420 if (need_31) {
421 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
424 return tmp;
427 static inline bool live_cc_data(DisasContext *s)
429 return (s->cc_op != CC_OP_DYNAMIC
430 && s->cc_op != CC_OP_STATIC
431 && s->cc_op > 3);
434 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
436 if (live_cc_data(s)) {
437 tcg_gen_discard_i64(cc_src);
438 tcg_gen_discard_i64(cc_dst);
439 tcg_gen_discard_i64(cc_vr);
441 s->cc_op = CC_OP_CONST0 + val;
444 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
446 if (live_cc_data(s)) {
447 tcg_gen_discard_i64(cc_src);
448 tcg_gen_discard_i64(cc_vr);
450 tcg_gen_mov_i64(cc_dst, dst);
451 s->cc_op = op;
454 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
455 TCGv_i64 dst)
457 if (live_cc_data(s)) {
458 tcg_gen_discard_i64(cc_vr);
460 tcg_gen_mov_i64(cc_src, src);
461 tcg_gen_mov_i64(cc_dst, dst);
462 s->cc_op = op;
465 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
466 TCGv_i64 dst, TCGv_i64 vr)
468 tcg_gen_mov_i64(cc_src, src);
469 tcg_gen_mov_i64(cc_dst, dst);
470 tcg_gen_mov_i64(cc_vr, vr);
471 s->cc_op = op;
474 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
476 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
479 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
481 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
484 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
486 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
489 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
491 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
494 /* CC value is in env->cc_op */
495 static void set_cc_static(DisasContext *s)
497 if (live_cc_data(s)) {
498 tcg_gen_discard_i64(cc_src);
499 tcg_gen_discard_i64(cc_dst);
500 tcg_gen_discard_i64(cc_vr);
502 s->cc_op = CC_OP_STATIC;
505 /* calculates cc into cc_op */
506 static void gen_op_calc_cc(DisasContext *s)
508 TCGv_i32 local_cc_op;
509 TCGv_i64 dummy;
511 TCGV_UNUSED_I32(local_cc_op);
512 TCGV_UNUSED_I64(dummy);
513 switch (s->cc_op) {
514 default:
515 dummy = tcg_const_i64(0);
516 /* FALLTHRU */
517 case CC_OP_ADD_64:
518 case CC_OP_ADDU_64:
519 case CC_OP_ADDC_64:
520 case CC_OP_SUB_64:
521 case CC_OP_SUBU_64:
522 case CC_OP_SUBB_64:
523 case CC_OP_ADD_32:
524 case CC_OP_ADDU_32:
525 case CC_OP_ADDC_32:
526 case CC_OP_SUB_32:
527 case CC_OP_SUBU_32:
528 case CC_OP_SUBB_32:
529 local_cc_op = tcg_const_i32(s->cc_op);
530 break;
531 case CC_OP_CONST0:
532 case CC_OP_CONST1:
533 case CC_OP_CONST2:
534 case CC_OP_CONST3:
535 case CC_OP_STATIC:
536 case CC_OP_DYNAMIC:
537 break;
540 switch (s->cc_op) {
541 case CC_OP_CONST0:
542 case CC_OP_CONST1:
543 case CC_OP_CONST2:
544 case CC_OP_CONST3:
545 /* s->cc_op is the cc value */
546 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
547 break;
548 case CC_OP_STATIC:
549 /* env->cc_op already is the cc value */
550 break;
551 case CC_OP_NZ:
552 case CC_OP_ABS_64:
553 case CC_OP_NABS_64:
554 case CC_OP_ABS_32:
555 case CC_OP_NABS_32:
556 case CC_OP_LTGT0_32:
557 case CC_OP_LTGT0_64:
558 case CC_OP_COMP_32:
559 case CC_OP_COMP_64:
560 case CC_OP_NZ_F32:
561 case CC_OP_NZ_F64:
562 case CC_OP_FLOGR:
563 /* 1 argument */
564 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
565 break;
566 case CC_OP_ICM:
567 case CC_OP_LTGT_32:
568 case CC_OP_LTGT_64:
569 case CC_OP_LTUGTU_32:
570 case CC_OP_LTUGTU_64:
571 case CC_OP_TM_32:
572 case CC_OP_TM_64:
573 case CC_OP_SLA_32:
574 case CC_OP_SLA_64:
575 case CC_OP_NZ_F128:
576 /* 2 arguments */
577 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
578 break;
579 case CC_OP_ADD_64:
580 case CC_OP_ADDU_64:
581 case CC_OP_ADDC_64:
582 case CC_OP_SUB_64:
583 case CC_OP_SUBU_64:
584 case CC_OP_SUBB_64:
585 case CC_OP_ADD_32:
586 case CC_OP_ADDU_32:
587 case CC_OP_ADDC_32:
588 case CC_OP_SUB_32:
589 case CC_OP_SUBU_32:
590 case CC_OP_SUBB_32:
591 /* 3 arguments */
592 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
593 break;
594 case CC_OP_DYNAMIC:
595 /* unknown operation - assume 3 arguments and cc_op in env */
596 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
597 break;
598 default:
599 tcg_abort();
602 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
603 tcg_temp_free_i32(local_cc_op);
605 if (!TCGV_IS_UNUSED_I64(dummy)) {
606 tcg_temp_free_i64(dummy);
609 /* We now have cc in cc_op as constant */
610 set_cc_static(s);
613 static bool use_exit_tb(DisasContext *s)
615 return (s->singlestep_enabled ||
616 (s->tb->cflags & CF_LAST_IO) ||
617 (s->tb->flags & FLAG_MASK_PER));
620 static bool use_goto_tb(DisasContext *s, uint64_t dest)
622 if (unlikely(use_exit_tb(s))) {
623 return false;
625 #ifndef CONFIG_USER_ONLY
626 return (dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) ||
627 (dest & TARGET_PAGE_MASK) == (s->pc & TARGET_PAGE_MASK);
628 #else
629 return true;
630 #endif
633 static void account_noninline_branch(DisasContext *s, int cc_op)
635 #ifdef DEBUG_INLINE_BRANCHES
636 inline_branch_miss[cc_op]++;
637 #endif
640 static void account_inline_branch(DisasContext *s, int cc_op)
642 #ifdef DEBUG_INLINE_BRANCHES
643 inline_branch_hit[cc_op]++;
644 #endif
647 /* Table of mask values to comparison codes, given a comparison as input.
648 For such, CC=3 should not be possible. */
649 static const TCGCond ltgt_cond[16] = {
650 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
651 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
652 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
653 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
654 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
655 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
656 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
657 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
660 /* Table of mask values to comparison codes, given a logic op as input.
661 For such, only CC=0 and CC=1 should be possible. */
662 static const TCGCond nz_cond[16] = {
663 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
664 TCG_COND_NEVER, TCG_COND_NEVER,
665 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
666 TCG_COND_NE, TCG_COND_NE,
667 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
668 TCG_COND_EQ, TCG_COND_EQ,
669 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
670 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
673 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
674 details required to generate a TCG comparison. */
675 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
677 TCGCond cond;
678 enum cc_op old_cc_op = s->cc_op;
680 if (mask == 15 || mask == 0) {
681 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
682 c->u.s32.a = cc_op;
683 c->u.s32.b = cc_op;
684 c->g1 = c->g2 = true;
685 c->is_64 = false;
686 return;
689 /* Find the TCG condition for the mask + cc op. */
690 switch (old_cc_op) {
691 case CC_OP_LTGT0_32:
692 case CC_OP_LTGT0_64:
693 case CC_OP_LTGT_32:
694 case CC_OP_LTGT_64:
695 cond = ltgt_cond[mask];
696 if (cond == TCG_COND_NEVER) {
697 goto do_dynamic;
699 account_inline_branch(s, old_cc_op);
700 break;
702 case CC_OP_LTUGTU_32:
703 case CC_OP_LTUGTU_64:
704 cond = tcg_unsigned_cond(ltgt_cond[mask]);
705 if (cond == TCG_COND_NEVER) {
706 goto do_dynamic;
708 account_inline_branch(s, old_cc_op);
709 break;
711 case CC_OP_NZ:
712 cond = nz_cond[mask];
713 if (cond == TCG_COND_NEVER) {
714 goto do_dynamic;
716 account_inline_branch(s, old_cc_op);
717 break;
719 case CC_OP_TM_32:
720 case CC_OP_TM_64:
721 switch (mask) {
722 case 8:
723 cond = TCG_COND_EQ;
724 break;
725 case 4 | 2 | 1:
726 cond = TCG_COND_NE;
727 break;
728 default:
729 goto do_dynamic;
731 account_inline_branch(s, old_cc_op);
732 break;
734 case CC_OP_ICM:
735 switch (mask) {
736 case 8:
737 cond = TCG_COND_EQ;
738 break;
739 case 4 | 2 | 1:
740 case 4 | 2:
741 cond = TCG_COND_NE;
742 break;
743 default:
744 goto do_dynamic;
746 account_inline_branch(s, old_cc_op);
747 break;
749 case CC_OP_FLOGR:
750 switch (mask & 0xa) {
751 case 8: /* src == 0 -> no one bit found */
752 cond = TCG_COND_EQ;
753 break;
754 case 2: /* src != 0 -> one bit found */
755 cond = TCG_COND_NE;
756 break;
757 default:
758 goto do_dynamic;
760 account_inline_branch(s, old_cc_op);
761 break;
763 case CC_OP_ADDU_32:
764 case CC_OP_ADDU_64:
765 switch (mask) {
766 case 8 | 2: /* vr == 0 */
767 cond = TCG_COND_EQ;
768 break;
769 case 4 | 1: /* vr != 0 */
770 cond = TCG_COND_NE;
771 break;
772 case 8 | 4: /* no carry -> vr >= src */
773 cond = TCG_COND_GEU;
774 break;
775 case 2 | 1: /* carry -> vr < src */
776 cond = TCG_COND_LTU;
777 break;
778 default:
779 goto do_dynamic;
781 account_inline_branch(s, old_cc_op);
782 break;
784 case CC_OP_SUBU_32:
785 case CC_OP_SUBU_64:
786 /* Note that CC=0 is impossible; treat it as dont-care. */
787 switch (mask & 7) {
788 case 2: /* zero -> op1 == op2 */
789 cond = TCG_COND_EQ;
790 break;
791 case 4 | 1: /* !zero -> op1 != op2 */
792 cond = TCG_COND_NE;
793 break;
794 case 4: /* borrow (!carry) -> op1 < op2 */
795 cond = TCG_COND_LTU;
796 break;
797 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
798 cond = TCG_COND_GEU;
799 break;
800 default:
801 goto do_dynamic;
803 account_inline_branch(s, old_cc_op);
804 break;
806 default:
807 do_dynamic:
808 /* Calculate cc value. */
809 gen_op_calc_cc(s);
810 /* FALLTHRU */
812 case CC_OP_STATIC:
813 /* Jump based on CC. We'll load up the real cond below;
814 the assignment here merely avoids a compiler warning. */
815 account_noninline_branch(s, old_cc_op);
816 old_cc_op = CC_OP_STATIC;
817 cond = TCG_COND_NEVER;
818 break;
821 /* Load up the arguments of the comparison. */
822 c->is_64 = true;
823 c->g1 = c->g2 = false;
824 switch (old_cc_op) {
825 case CC_OP_LTGT0_32:
826 c->is_64 = false;
827 c->u.s32.a = tcg_temp_new_i32();
828 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
829 c->u.s32.b = tcg_const_i32(0);
830 break;
831 case CC_OP_LTGT_32:
832 case CC_OP_LTUGTU_32:
833 case CC_OP_SUBU_32:
834 c->is_64 = false;
835 c->u.s32.a = tcg_temp_new_i32();
836 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
837 c->u.s32.b = tcg_temp_new_i32();
838 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
839 break;
841 case CC_OP_LTGT0_64:
842 case CC_OP_NZ:
843 case CC_OP_FLOGR:
844 c->u.s64.a = cc_dst;
845 c->u.s64.b = tcg_const_i64(0);
846 c->g1 = true;
847 break;
848 case CC_OP_LTGT_64:
849 case CC_OP_LTUGTU_64:
850 case CC_OP_SUBU_64:
851 c->u.s64.a = cc_src;
852 c->u.s64.b = cc_dst;
853 c->g1 = c->g2 = true;
854 break;
856 case CC_OP_TM_32:
857 case CC_OP_TM_64:
858 case CC_OP_ICM:
859 c->u.s64.a = tcg_temp_new_i64();
860 c->u.s64.b = tcg_const_i64(0);
861 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
862 break;
864 case CC_OP_ADDU_32:
865 c->is_64 = false;
866 c->u.s32.a = tcg_temp_new_i32();
867 c->u.s32.b = tcg_temp_new_i32();
868 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
869 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
870 tcg_gen_movi_i32(c->u.s32.b, 0);
871 } else {
872 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
874 break;
876 case CC_OP_ADDU_64:
877 c->u.s64.a = cc_vr;
878 c->g1 = true;
879 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
880 c->u.s64.b = tcg_const_i64(0);
881 } else {
882 c->u.s64.b = cc_src;
883 c->g2 = true;
885 break;
887 case CC_OP_STATIC:
888 c->is_64 = false;
889 c->u.s32.a = cc_op;
890 c->g1 = true;
891 switch (mask) {
892 case 0x8 | 0x4 | 0x2: /* cc != 3 */
893 cond = TCG_COND_NE;
894 c->u.s32.b = tcg_const_i32(3);
895 break;
896 case 0x8 | 0x4 | 0x1: /* cc != 2 */
897 cond = TCG_COND_NE;
898 c->u.s32.b = tcg_const_i32(2);
899 break;
900 case 0x8 | 0x2 | 0x1: /* cc != 1 */
901 cond = TCG_COND_NE;
902 c->u.s32.b = tcg_const_i32(1);
903 break;
904 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
905 cond = TCG_COND_EQ;
906 c->g1 = false;
907 c->u.s32.a = tcg_temp_new_i32();
908 c->u.s32.b = tcg_const_i32(0);
909 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
910 break;
911 case 0x8 | 0x4: /* cc < 2 */
912 cond = TCG_COND_LTU;
913 c->u.s32.b = tcg_const_i32(2);
914 break;
915 case 0x8: /* cc == 0 */
916 cond = TCG_COND_EQ;
917 c->u.s32.b = tcg_const_i32(0);
918 break;
919 case 0x4 | 0x2 | 0x1: /* cc != 0 */
920 cond = TCG_COND_NE;
921 c->u.s32.b = tcg_const_i32(0);
922 break;
923 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
924 cond = TCG_COND_NE;
925 c->g1 = false;
926 c->u.s32.a = tcg_temp_new_i32();
927 c->u.s32.b = tcg_const_i32(0);
928 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
929 break;
930 case 0x4: /* cc == 1 */
931 cond = TCG_COND_EQ;
932 c->u.s32.b = tcg_const_i32(1);
933 break;
934 case 0x2 | 0x1: /* cc > 1 */
935 cond = TCG_COND_GTU;
936 c->u.s32.b = tcg_const_i32(1);
937 break;
938 case 0x2: /* cc == 2 */
939 cond = TCG_COND_EQ;
940 c->u.s32.b = tcg_const_i32(2);
941 break;
942 case 0x1: /* cc == 3 */
943 cond = TCG_COND_EQ;
944 c->u.s32.b = tcg_const_i32(3);
945 break;
946 default:
947 /* CC is masked by something else: (8 >> cc) & mask. */
948 cond = TCG_COND_NE;
949 c->g1 = false;
950 c->u.s32.a = tcg_const_i32(8);
951 c->u.s32.b = tcg_const_i32(0);
952 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
953 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
954 break;
956 break;
958 default:
959 abort();
961 c->cond = cond;
964 static void free_compare(DisasCompare *c)
966 if (!c->g1) {
967 if (c->is_64) {
968 tcg_temp_free_i64(c->u.s64.a);
969 } else {
970 tcg_temp_free_i32(c->u.s32.a);
973 if (!c->g2) {
974 if (c->is_64) {
975 tcg_temp_free_i64(c->u.s64.b);
976 } else {
977 tcg_temp_free_i32(c->u.s32.b);
982 /* ====================================================================== */
983 /* Define the insn format enumeration. */
984 #define F0(N) FMT_##N,
985 #define F1(N, X1) F0(N)
986 #define F2(N, X1, X2) F0(N)
987 #define F3(N, X1, X2, X3) F0(N)
988 #define F4(N, X1, X2, X3, X4) F0(N)
989 #define F5(N, X1, X2, X3, X4, X5) F0(N)
991 typedef enum {
992 #include "insn-format.def"
993 } DisasFormat;
995 #undef F0
996 #undef F1
997 #undef F2
998 #undef F3
999 #undef F4
1000 #undef F5
1002 /* Define a structure to hold the decoded fields. We'll store each inside
1003 an array indexed by an enum. In order to conserve memory, we'll arrange
1004 for fields that do not exist at the same time to overlap, thus the "C"
1005 for compact. For checking purposes there is an "O" for original index
1006 as well that will be applied to availability bitmaps. */
1008 enum DisasFieldIndexO {
1009 FLD_O_r1,
1010 FLD_O_r2,
1011 FLD_O_r3,
1012 FLD_O_m1,
1013 FLD_O_m3,
1014 FLD_O_m4,
1015 FLD_O_b1,
1016 FLD_O_b2,
1017 FLD_O_b4,
1018 FLD_O_d1,
1019 FLD_O_d2,
1020 FLD_O_d4,
1021 FLD_O_x2,
1022 FLD_O_l1,
1023 FLD_O_l2,
1024 FLD_O_i1,
1025 FLD_O_i2,
1026 FLD_O_i3,
1027 FLD_O_i4,
1028 FLD_O_i5
1031 enum DisasFieldIndexC {
1032 FLD_C_r1 = 0,
1033 FLD_C_m1 = 0,
1034 FLD_C_b1 = 0,
1035 FLD_C_i1 = 0,
1037 FLD_C_r2 = 1,
1038 FLD_C_b2 = 1,
1039 FLD_C_i2 = 1,
1041 FLD_C_r3 = 2,
1042 FLD_C_m3 = 2,
1043 FLD_C_i3 = 2,
1045 FLD_C_m4 = 3,
1046 FLD_C_b4 = 3,
1047 FLD_C_i4 = 3,
1048 FLD_C_l1 = 3,
1050 FLD_C_i5 = 4,
1051 FLD_C_d1 = 4,
1053 FLD_C_d2 = 5,
1055 FLD_C_d4 = 6,
1056 FLD_C_x2 = 6,
1057 FLD_C_l2 = 6,
1059 NUM_C_FIELD = 7
1062 struct DisasFields {
1063 uint64_t raw_insn;
1064 unsigned op:8;
1065 unsigned op2:8;
1066 unsigned presentC:16;
1067 unsigned int presentO;
1068 int c[NUM_C_FIELD];
1071 /* This is the way fields are to be accessed out of DisasFields. */
1072 #define have_field(S, F) have_field1((S), FLD_O_##F)
1073 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1075 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1077 return (f->presentO >> c) & 1;
1080 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1081 enum DisasFieldIndexC c)
1083 assert(have_field1(f, o));
1084 return f->c[c];
1087 /* Describe the layout of each field in each format. */
1088 typedef struct DisasField {
1089 unsigned int beg:8;
1090 unsigned int size:8;
1091 unsigned int type:2;
1092 unsigned int indexC:6;
1093 enum DisasFieldIndexO indexO:8;
1094 } DisasField;
1096 typedef struct DisasFormatInfo {
1097 DisasField op[NUM_C_FIELD];
1098 } DisasFormatInfo;
1100 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1101 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1102 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1103 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1104 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1105 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1106 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1107 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1108 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1109 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1110 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1111 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1112 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1113 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1115 #define F0(N) { { } },
1116 #define F1(N, X1) { { X1 } },
1117 #define F2(N, X1, X2) { { X1, X2 } },
1118 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1119 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1120 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1122 static const DisasFormatInfo format_info[] = {
1123 #include "insn-format.def"
1126 #undef F0
1127 #undef F1
1128 #undef F2
1129 #undef F3
1130 #undef F4
1131 #undef F5
1132 #undef R
1133 #undef M
1134 #undef BD
1135 #undef BXD
1136 #undef BDL
1137 #undef BXDL
1138 #undef I
1139 #undef L
1141 /* Generally, we'll extract operands into this structures, operate upon
1142 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1143 of routines below for more details. */
1144 typedef struct {
1145 bool g_out, g_out2, g_in1, g_in2;
1146 TCGv_i64 out, out2, in1, in2;
1147 TCGv_i64 addr1;
1148 } DisasOps;
1150 /* Instructions can place constraints on their operands, raising specification
1151 exceptions if they are violated. To make this easy to automate, each "in1",
1152 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1153 of the following, or 0. To make this easy to document, we'll put the
1154 SPEC_<name> defines next to <name>. */
1156 #define SPEC_r1_even 1
1157 #define SPEC_r2_even 2
1158 #define SPEC_r3_even 4
1159 #define SPEC_r1_f128 8
1160 #define SPEC_r2_f128 16
1162 /* Return values from translate_one, indicating the state of the TB. */
1163 typedef enum {
1164 /* Continue the TB. */
1165 NO_EXIT,
1166 /* We have emitted one or more goto_tb. No fixup required. */
1167 EXIT_GOTO_TB,
1168 /* We are not using a goto_tb (for whatever reason), but have updated
1169 the PC (for whatever reason), so there's no need to do it again on
1170 exiting the TB. */
1171 EXIT_PC_UPDATED,
1172 /* We have updated the PC and CC values. */
1173 EXIT_PC_CC_UPDATED,
1174 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1175 updated the PC for the next instruction to be executed. */
1176 EXIT_PC_STALE,
1177 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1178 No following code will be executed. */
1179 EXIT_NORETURN,
1180 } ExitStatus;
1182 typedef enum DisasFacility {
1183 FAC_Z, /* zarch (default) */
1184 FAC_CASS, /* compare and swap and store */
1185 FAC_CASS2, /* compare and swap and store 2*/
1186 FAC_DFP, /* decimal floating point */
1187 FAC_DFPR, /* decimal floating point rounding */
1188 FAC_DO, /* distinct operands */
1189 FAC_EE, /* execute extensions */
1190 FAC_EI, /* extended immediate */
1191 FAC_FPE, /* floating point extension */
1192 FAC_FPSSH, /* floating point support sign handling */
1193 FAC_FPRGR, /* FPR-GR transfer */
1194 FAC_GIE, /* general instructions extension */
1195 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1196 FAC_HW, /* high-word */
1197 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1198 FAC_MIE, /* miscellaneous-instruction-extensions */
1199 FAC_LAT, /* load-and-trap */
1200 FAC_LOC, /* load/store on condition */
1201 FAC_LD, /* long displacement */
1202 FAC_PC, /* population count */
1203 FAC_SCF, /* store clock fast */
1204 FAC_SFLE, /* store facility list extended */
1205 FAC_ILA, /* interlocked access facility 1 */
1206 FAC_LPP, /* load-program-parameter */
1207 FAC_DAT_ENH, /* DAT-enhancement */
1208 } DisasFacility;
1210 struct DisasInsn {
1211 unsigned opc:16;
1212 DisasFormat fmt:8;
1213 DisasFacility fac:8;
1214 unsigned spec:8;
1216 const char *name;
1218 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1219 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1220 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1221 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1222 void (*help_cout)(DisasContext *, DisasOps *);
1223 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1225 uint64_t data;
1228 /* ====================================================================== */
1229 /* Miscellaneous helpers, used by several operations. */
1231 static void help_l2_shift(DisasContext *s, DisasFields *f,
1232 DisasOps *o, int mask)
1234 int b2 = get_field(f, b2);
1235 int d2 = get_field(f, d2);
1237 if (b2 == 0) {
1238 o->in2 = tcg_const_i64(d2 & mask);
1239 } else {
1240 o->in2 = get_address(s, 0, b2, d2);
1241 tcg_gen_andi_i64(o->in2, o->in2, mask);
1245 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1247 if (dest == s->next_pc) {
1248 per_branch(s, true);
1249 return NO_EXIT;
1251 if (use_goto_tb(s, dest)) {
1252 update_cc_op(s);
1253 per_breaking_event(s);
1254 tcg_gen_goto_tb(0);
1255 tcg_gen_movi_i64(psw_addr, dest);
1256 tcg_gen_exit_tb((uintptr_t)s->tb);
1257 return EXIT_GOTO_TB;
1258 } else {
1259 tcg_gen_movi_i64(psw_addr, dest);
1260 per_branch(s, false);
1261 return EXIT_PC_UPDATED;
1265 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1266 bool is_imm, int imm, TCGv_i64 cdest)
1268 ExitStatus ret;
1269 uint64_t dest = s->pc + 2 * imm;
1270 TCGLabel *lab;
1272 /* Take care of the special cases first. */
1273 if (c->cond == TCG_COND_NEVER) {
1274 ret = NO_EXIT;
1275 goto egress;
1277 if (is_imm) {
1278 if (dest == s->next_pc) {
1279 /* Branch to next. */
1280 per_branch(s, true);
1281 ret = NO_EXIT;
1282 goto egress;
1284 if (c->cond == TCG_COND_ALWAYS) {
1285 ret = help_goto_direct(s, dest);
1286 goto egress;
1288 } else {
1289 if (TCGV_IS_UNUSED_I64(cdest)) {
1290 /* E.g. bcr %r0 -> no branch. */
1291 ret = NO_EXIT;
1292 goto egress;
1294 if (c->cond == TCG_COND_ALWAYS) {
1295 tcg_gen_mov_i64(psw_addr, cdest);
1296 per_branch(s, false);
1297 ret = EXIT_PC_UPDATED;
1298 goto egress;
1302 if (use_goto_tb(s, s->next_pc)) {
1303 if (is_imm && use_goto_tb(s, dest)) {
1304 /* Both exits can use goto_tb. */
1305 update_cc_op(s);
1307 lab = gen_new_label();
1308 if (c->is_64) {
1309 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1310 } else {
1311 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1314 /* Branch not taken. */
1315 tcg_gen_goto_tb(0);
1316 tcg_gen_movi_i64(psw_addr, s->next_pc);
1317 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1319 /* Branch taken. */
1320 gen_set_label(lab);
1321 per_breaking_event(s);
1322 tcg_gen_goto_tb(1);
1323 tcg_gen_movi_i64(psw_addr, dest);
1324 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1326 ret = EXIT_GOTO_TB;
1327 } else {
1328 /* Fallthru can use goto_tb, but taken branch cannot. */
1329 /* Store taken branch destination before the brcond. This
1330 avoids having to allocate a new local temp to hold it.
1331 We'll overwrite this in the not taken case anyway. */
1332 if (!is_imm) {
1333 tcg_gen_mov_i64(psw_addr, cdest);
1336 lab = gen_new_label();
1337 if (c->is_64) {
1338 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1339 } else {
1340 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1343 /* Branch not taken. */
1344 update_cc_op(s);
1345 tcg_gen_goto_tb(0);
1346 tcg_gen_movi_i64(psw_addr, s->next_pc);
1347 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1349 gen_set_label(lab);
1350 if (is_imm) {
1351 tcg_gen_movi_i64(psw_addr, dest);
1353 per_breaking_event(s);
1354 ret = EXIT_PC_UPDATED;
1356 } else {
1357 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1358 Most commonly we're single-stepping or some other condition that
1359 disables all use of goto_tb. Just update the PC and exit. */
1361 TCGv_i64 next = tcg_const_i64(s->next_pc);
1362 if (is_imm) {
1363 cdest = tcg_const_i64(dest);
1366 if (c->is_64) {
1367 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1368 cdest, next);
1369 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1370 } else {
1371 TCGv_i32 t0 = tcg_temp_new_i32();
1372 TCGv_i64 t1 = tcg_temp_new_i64();
1373 TCGv_i64 z = tcg_const_i64(0);
1374 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1375 tcg_gen_extu_i32_i64(t1, t0);
1376 tcg_temp_free_i32(t0);
1377 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1378 per_branch_cond(s, TCG_COND_NE, t1, z);
1379 tcg_temp_free_i64(t1);
1380 tcg_temp_free_i64(z);
1383 if (is_imm) {
1384 tcg_temp_free_i64(cdest);
1386 tcg_temp_free_i64(next);
1388 ret = EXIT_PC_UPDATED;
1391 egress:
1392 free_compare(c);
1393 return ret;
1396 /* ====================================================================== */
1397 /* The operations. These perform the bulk of the work for any insn,
1398 usually after the operands have been loaded and output initialized. */
1400 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1402 TCGv_i64 z, n;
1403 z = tcg_const_i64(0);
1404 n = tcg_temp_new_i64();
1405 tcg_gen_neg_i64(n, o->in2);
1406 tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1407 tcg_temp_free_i64(n);
1408 tcg_temp_free_i64(z);
1409 return NO_EXIT;
1412 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1414 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1415 return NO_EXIT;
1418 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1420 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1421 return NO_EXIT;
1424 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1426 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1427 tcg_gen_mov_i64(o->out2, o->in2);
1428 return NO_EXIT;
1431 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1433 tcg_gen_add_i64(o->out, o->in1, o->in2);
1434 return NO_EXIT;
1437 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1439 DisasCompare cmp;
1440 TCGv_i64 carry;
1442 tcg_gen_add_i64(o->out, o->in1, o->in2);
1444 /* The carry flag is the msb of CC, therefore the branch mask that would
1445 create that comparison is 3. Feeding the generated comparison to
1446 setcond produces the carry flag that we desire. */
1447 disas_jcc(s, &cmp, 3);
1448 carry = tcg_temp_new_i64();
1449 if (cmp.is_64) {
1450 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1451 } else {
1452 TCGv_i32 t = tcg_temp_new_i32();
1453 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1454 tcg_gen_extu_i32_i64(carry, t);
1455 tcg_temp_free_i32(t);
1457 free_compare(&cmp);
1459 tcg_gen_add_i64(o->out, o->out, carry);
1460 tcg_temp_free_i64(carry);
1461 return NO_EXIT;
1464 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1466 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1467 return NO_EXIT;
1470 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1472 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1473 return NO_EXIT;
1476 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1478 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1479 return_low128(o->out2);
1480 return NO_EXIT;
1483 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1485 tcg_gen_and_i64(o->out, o->in1, o->in2);
1486 return NO_EXIT;
1489 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1491 int shift = s->insn->data & 0xff;
1492 int size = s->insn->data >> 8;
1493 uint64_t mask = ((1ull << size) - 1) << shift;
1495 assert(!o->g_in2);
1496 tcg_gen_shli_i64(o->in2, o->in2, shift);
1497 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1498 tcg_gen_and_i64(o->out, o->in1, o->in2);
1500 /* Produce the CC from only the bits manipulated. */
1501 tcg_gen_andi_i64(cc_dst, o->out, mask);
1502 set_cc_nz_u64(s, cc_dst);
1503 return NO_EXIT;
1506 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1508 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1509 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1510 tcg_gen_mov_i64(psw_addr, o->in2);
1511 per_branch(s, false);
1512 return EXIT_PC_UPDATED;
1513 } else {
1514 return NO_EXIT;
1518 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1520 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1521 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1524 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1526 int m1 = get_field(s->fields, m1);
1527 bool is_imm = have_field(s->fields, i2);
1528 int imm = is_imm ? get_field(s->fields, i2) : 0;
1529 DisasCompare c;
1531 /* BCR with R2 = 0 causes no branching */
1532 if (have_field(s->fields, r2) && get_field(s->fields, r2) == 0) {
1533 if (m1 == 14) {
1534 /* Perform serialization */
1535 /* FIXME: check for fast-BCR-serialization facility */
1536 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1538 if (m1 == 15) {
1539 /* Perform serialization */
1540 /* FIXME: perform checkpoint-synchronisation */
1541 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1543 return NO_EXIT;
1546 disas_jcc(s, &c, m1);
1547 return help_branch(s, &c, is_imm, imm, o->in2);
1550 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1552 int r1 = get_field(s->fields, r1);
1553 bool is_imm = have_field(s->fields, i2);
1554 int imm = is_imm ? get_field(s->fields, i2) : 0;
1555 DisasCompare c;
1556 TCGv_i64 t;
1558 c.cond = TCG_COND_NE;
1559 c.is_64 = false;
1560 c.g1 = false;
1561 c.g2 = false;
1563 t = tcg_temp_new_i64();
1564 tcg_gen_subi_i64(t, regs[r1], 1);
1565 store_reg32_i64(r1, t);
1566 c.u.s32.a = tcg_temp_new_i32();
1567 c.u.s32.b = tcg_const_i32(0);
1568 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1569 tcg_temp_free_i64(t);
1571 return help_branch(s, &c, is_imm, imm, o->in2);
1574 static ExitStatus op_bcth(DisasContext *s, DisasOps *o)
1576 int r1 = get_field(s->fields, r1);
1577 int imm = get_field(s->fields, i2);
1578 DisasCompare c;
1579 TCGv_i64 t;
1581 c.cond = TCG_COND_NE;
1582 c.is_64 = false;
1583 c.g1 = false;
1584 c.g2 = false;
1586 t = tcg_temp_new_i64();
1587 tcg_gen_shri_i64(t, regs[r1], 32);
1588 tcg_gen_subi_i64(t, t, 1);
1589 store_reg32h_i64(r1, t);
1590 c.u.s32.a = tcg_temp_new_i32();
1591 c.u.s32.b = tcg_const_i32(0);
1592 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1593 tcg_temp_free_i64(t);
1595 return help_branch(s, &c, 1, imm, o->in2);
1598 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1600 int r1 = get_field(s->fields, r1);
1601 bool is_imm = have_field(s->fields, i2);
1602 int imm = is_imm ? get_field(s->fields, i2) : 0;
1603 DisasCompare c;
1605 c.cond = TCG_COND_NE;
1606 c.is_64 = true;
1607 c.g1 = true;
1608 c.g2 = false;
1610 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1611 c.u.s64.a = regs[r1];
1612 c.u.s64.b = tcg_const_i64(0);
1614 return help_branch(s, &c, is_imm, imm, o->in2);
1617 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1619 int r1 = get_field(s->fields, r1);
1620 int r3 = get_field(s->fields, r3);
1621 bool is_imm = have_field(s->fields, i2);
1622 int imm = is_imm ? get_field(s->fields, i2) : 0;
1623 DisasCompare c;
1624 TCGv_i64 t;
1626 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1627 c.is_64 = false;
1628 c.g1 = false;
1629 c.g2 = false;
1631 t = tcg_temp_new_i64();
1632 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1633 c.u.s32.a = tcg_temp_new_i32();
1634 c.u.s32.b = tcg_temp_new_i32();
1635 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1636 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1637 store_reg32_i64(r1, t);
1638 tcg_temp_free_i64(t);
1640 return help_branch(s, &c, is_imm, imm, o->in2);
1643 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1645 int r1 = get_field(s->fields, r1);
1646 int r3 = get_field(s->fields, r3);
1647 bool is_imm = have_field(s->fields, i2);
1648 int imm = is_imm ? get_field(s->fields, i2) : 0;
1649 DisasCompare c;
1651 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1652 c.is_64 = true;
1654 if (r1 == (r3 | 1)) {
1655 c.u.s64.b = load_reg(r3 | 1);
1656 c.g2 = false;
1657 } else {
1658 c.u.s64.b = regs[r3 | 1];
1659 c.g2 = true;
1662 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1663 c.u.s64.a = regs[r1];
1664 c.g1 = true;
1666 return help_branch(s, &c, is_imm, imm, o->in2);
1669 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1671 int imm, m3 = get_field(s->fields, m3);
1672 bool is_imm;
1673 DisasCompare c;
1675 c.cond = ltgt_cond[m3];
1676 if (s->insn->data) {
1677 c.cond = tcg_unsigned_cond(c.cond);
1679 c.is_64 = c.g1 = c.g2 = true;
1680 c.u.s64.a = o->in1;
1681 c.u.s64.b = o->in2;
1683 is_imm = have_field(s->fields, i4);
1684 if (is_imm) {
1685 imm = get_field(s->fields, i4);
1686 } else {
1687 imm = 0;
1688 o->out = get_address(s, 0, get_field(s->fields, b4),
1689 get_field(s->fields, d4));
1692 return help_branch(s, &c, is_imm, imm, o->out);
1695 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1697 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1698 set_cc_static(s);
1699 return NO_EXIT;
1702 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1704 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1705 set_cc_static(s);
1706 return NO_EXIT;
1709 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1711 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1712 set_cc_static(s);
1713 return NO_EXIT;
1716 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1718 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1719 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1720 tcg_temp_free_i32(m3);
1721 gen_set_cc_nz_f32(s, o->in2);
1722 return NO_EXIT;
1725 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1727 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1728 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1729 tcg_temp_free_i32(m3);
1730 gen_set_cc_nz_f64(s, o->in2);
1731 return NO_EXIT;
1734 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1736 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1737 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1738 tcg_temp_free_i32(m3);
1739 gen_set_cc_nz_f128(s, o->in1, o->in2);
1740 return NO_EXIT;
1743 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1745 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1746 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1747 tcg_temp_free_i32(m3);
1748 gen_set_cc_nz_f32(s, o->in2);
1749 return NO_EXIT;
1752 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1754 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1755 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1756 tcg_temp_free_i32(m3);
1757 gen_set_cc_nz_f64(s, o->in2);
1758 return NO_EXIT;
1761 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1763 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1764 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1765 tcg_temp_free_i32(m3);
1766 gen_set_cc_nz_f128(s, o->in1, o->in2);
1767 return NO_EXIT;
1770 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1772 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1773 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1774 tcg_temp_free_i32(m3);
1775 gen_set_cc_nz_f32(s, o->in2);
1776 return NO_EXIT;
1779 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1781 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1782 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1783 tcg_temp_free_i32(m3);
1784 gen_set_cc_nz_f64(s, o->in2);
1785 return NO_EXIT;
1788 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1790 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1791 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1792 tcg_temp_free_i32(m3);
1793 gen_set_cc_nz_f128(s, o->in1, o->in2);
1794 return NO_EXIT;
1797 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1799 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1800 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1801 tcg_temp_free_i32(m3);
1802 gen_set_cc_nz_f32(s, o->in2);
1803 return NO_EXIT;
1806 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1808 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1809 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1810 tcg_temp_free_i32(m3);
1811 gen_set_cc_nz_f64(s, o->in2);
1812 return NO_EXIT;
1815 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1817 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1818 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1819 tcg_temp_free_i32(m3);
1820 gen_set_cc_nz_f128(s, o->in1, o->in2);
1821 return NO_EXIT;
1824 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1826 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1827 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1828 tcg_temp_free_i32(m3);
1829 return NO_EXIT;
1832 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1834 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1835 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1836 tcg_temp_free_i32(m3);
1837 return NO_EXIT;
1840 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1842 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1843 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1844 tcg_temp_free_i32(m3);
1845 return_low128(o->out2);
1846 return NO_EXIT;
1849 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1851 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1852 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1853 tcg_temp_free_i32(m3);
1854 return NO_EXIT;
1857 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1859 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1860 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1861 tcg_temp_free_i32(m3);
1862 return NO_EXIT;
1865 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1867 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1868 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1869 tcg_temp_free_i32(m3);
1870 return_low128(o->out2);
1871 return NO_EXIT;
1874 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1876 int r2 = get_field(s->fields, r2);
1877 TCGv_i64 len = tcg_temp_new_i64();
1879 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1880 set_cc_static(s);
1881 return_low128(o->out);
1883 tcg_gen_add_i64(regs[r2], regs[r2], len);
1884 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1885 tcg_temp_free_i64(len);
1887 return NO_EXIT;
1890 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1892 int l = get_field(s->fields, l1);
1893 TCGv_i32 vl;
1895 switch (l + 1) {
1896 case 1:
1897 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1898 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1899 break;
1900 case 2:
1901 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1902 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1903 break;
1904 case 4:
1905 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1906 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1907 break;
1908 case 8:
1909 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1910 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1911 break;
1912 default:
1913 vl = tcg_const_i32(l);
1914 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1915 tcg_temp_free_i32(vl);
1916 set_cc_static(s);
1917 return NO_EXIT;
1919 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1920 return NO_EXIT;
1923 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1925 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1926 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1927 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1928 tcg_temp_free_i32(r1);
1929 tcg_temp_free_i32(r3);
1930 set_cc_static(s);
1931 return NO_EXIT;
1934 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1936 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1937 TCGv_i32 t1 = tcg_temp_new_i32();
1938 tcg_gen_extrl_i64_i32(t1, o->in1);
1939 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1940 set_cc_static(s);
1941 tcg_temp_free_i32(t1);
1942 tcg_temp_free_i32(m3);
1943 return NO_EXIT;
1946 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1948 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1949 set_cc_static(s);
1950 return_low128(o->in2);
1951 return NO_EXIT;
1954 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1956 TCGv_i64 t = tcg_temp_new_i64();
1957 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1958 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1959 tcg_gen_or_i64(o->out, o->out, t);
1960 tcg_temp_free_i64(t);
1961 return NO_EXIT;
1964 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1966 int d2 = get_field(s->fields, d2);
1967 int b2 = get_field(s->fields, b2);
1968 TCGv_i64 addr, cc;
1970 /* Note that in1 = R3 (new value) and
1971 in2 = (zero-extended) R1 (expected value). */
1973 addr = get_address(s, 0, b2, d2);
1974 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
1975 get_mem_index(s), s->insn->data | MO_ALIGN);
1976 tcg_temp_free_i64(addr);
1978 /* Are the memory and expected values (un)equal? Note that this setcond
1979 produces the output CC value, thus the NE sense of the test. */
1980 cc = tcg_temp_new_i64();
1981 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1982 tcg_gen_extrl_i64_i32(cc_op, cc);
1983 tcg_temp_free_i64(cc);
1984 set_cc_static(s);
1986 return NO_EXIT;
1989 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1991 int r1 = get_field(s->fields, r1);
1992 int r3 = get_field(s->fields, r3);
1993 int d2 = get_field(s->fields, d2);
1994 int b2 = get_field(s->fields, b2);
1995 TCGv_i64 addr;
1996 TCGv_i32 t_r1, t_r3;
1998 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1999 addr = get_address(s, 0, b2, d2);
2000 t_r1 = tcg_const_i32(r1);
2001 t_r3 = tcg_const_i32(r3);
2002 gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2003 tcg_temp_free_i64(addr);
2004 tcg_temp_free_i32(t_r1);
2005 tcg_temp_free_i32(t_r3);
2007 set_cc_static(s);
2008 return NO_EXIT;
2011 #ifndef CONFIG_USER_ONLY
2012 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
2014 TCGMemOp mop = s->insn->data;
2015 TCGv_i64 addr, old, cc;
2016 TCGLabel *lab = gen_new_label();
2018 /* Note that in1 = R1 (zero-extended expected value),
2019 out = R1 (original reg), out2 = R1+1 (new value). */
2021 check_privileged(s);
2022 addr = tcg_temp_new_i64();
2023 old = tcg_temp_new_i64();
2024 tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2025 tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2026 get_mem_index(s), mop | MO_ALIGN);
2027 tcg_temp_free_i64(addr);
2029 /* Are the memory and expected values (un)equal? */
2030 cc = tcg_temp_new_i64();
2031 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2032 tcg_gen_extrl_i64_i32(cc_op, cc);
2034 /* Write back the output now, so that it happens before the
2035 following branch, so that we don't need local temps. */
2036 if ((mop & MO_SIZE) == MO_32) {
2037 tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2038 } else {
2039 tcg_gen_mov_i64(o->out, old);
2041 tcg_temp_free_i64(old);
2043 /* If the comparison was equal, and the LSB of R2 was set,
2044 then we need to flush the TLB (for all cpus). */
2045 tcg_gen_xori_i64(cc, cc, 1);
2046 tcg_gen_and_i64(cc, cc, o->in2);
2047 tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2048 tcg_temp_free_i64(cc);
2050 gen_helper_purge(cpu_env);
2051 gen_set_label(lab);
2053 return NO_EXIT;
2055 #endif
2057 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2059 TCGv_i64 t1 = tcg_temp_new_i64();
2060 TCGv_i32 t2 = tcg_temp_new_i32();
2061 tcg_gen_extrl_i64_i32(t2, o->in1);
2062 gen_helper_cvd(t1, t2);
2063 tcg_temp_free_i32(t2);
2064 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2065 tcg_temp_free_i64(t1);
2066 return NO_EXIT;
2069 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
2071 int m3 = get_field(s->fields, m3);
2072 TCGLabel *lab = gen_new_label();
2073 TCGCond c;
2075 c = tcg_invert_cond(ltgt_cond[m3]);
2076 if (s->insn->data) {
2077 c = tcg_unsigned_cond(c);
2079 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2081 /* Trap. */
2082 gen_trap(s);
2084 gen_set_label(lab);
2085 return NO_EXIT;
2088 #ifndef CONFIG_USER_ONLY
2089 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2091 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2092 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2093 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2095 check_privileged(s);
2096 update_psw_addr(s);
2097 gen_op_calc_cc(s);
2099 gen_helper_diag(cpu_env, r1, r3, func_code);
2101 tcg_temp_free_i32(func_code);
2102 tcg_temp_free_i32(r3);
2103 tcg_temp_free_i32(r1);
2104 return NO_EXIT;
2106 #endif
2108 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2110 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2111 return_low128(o->out);
2112 return NO_EXIT;
2115 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2117 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2118 return_low128(o->out);
2119 return NO_EXIT;
2122 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2124 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2125 return_low128(o->out);
2126 return NO_EXIT;
2129 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2131 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2132 return_low128(o->out);
2133 return NO_EXIT;
2136 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2138 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2139 return NO_EXIT;
2142 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2144 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2145 return NO_EXIT;
2148 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2150 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2151 return_low128(o->out2);
2152 return NO_EXIT;
2155 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2157 int r2 = get_field(s->fields, r2);
2158 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2159 return NO_EXIT;
2162 static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
2164 /* No cache information provided. */
2165 tcg_gen_movi_i64(o->out, -1);
2166 return NO_EXIT;
2169 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2171 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2172 return NO_EXIT;
2175 static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
2177 int r1 = get_field(s->fields, r1);
2178 int r2 = get_field(s->fields, r2);
2179 TCGv_i64 t = tcg_temp_new_i64();
2181 /* Note the "subsequently" in the PoO, which implies a defined result
2182 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2183 tcg_gen_shri_i64(t, psw_mask, 32);
2184 store_reg32_i64(r1, t);
2185 if (r2 != 0) {
2186 store_reg32_i64(r2, psw_mask);
2189 tcg_temp_free_i64(t);
2190 return NO_EXIT;
2193 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2195 int r1 = get_field(s->fields, r1);
2196 TCGv_i32 ilen;
2197 TCGv_i64 v1;
2199 /* Nested EXECUTE is not allowed. */
2200 if (unlikely(s->ex_value)) {
2201 gen_program_exception(s, PGM_EXECUTE);
2202 return EXIT_NORETURN;
2205 update_psw_addr(s);
2206 update_cc_op(s);
2208 if (r1 == 0) {
2209 v1 = tcg_const_i64(0);
2210 } else {
2211 v1 = regs[r1];
2214 ilen = tcg_const_i32(s->ilen);
2215 gen_helper_ex(cpu_env, ilen, v1, o->in2);
2216 tcg_temp_free_i32(ilen);
2218 if (r1 == 0) {
2219 tcg_temp_free_i64(v1);
2222 return EXIT_PC_CC_UPDATED;
2225 static ExitStatus op_fieb(DisasContext *s, DisasOps *o)
2227 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2228 gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2229 tcg_temp_free_i32(m3);
2230 return NO_EXIT;
2233 static ExitStatus op_fidb(DisasContext *s, DisasOps *o)
2235 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2236 gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2237 tcg_temp_free_i32(m3);
2238 return NO_EXIT;
2241 static ExitStatus op_fixb(DisasContext *s, DisasOps *o)
2243 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2244 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2245 return_low128(o->out2);
2246 tcg_temp_free_i32(m3);
2247 return NO_EXIT;
2250 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2252 /* We'll use the original input for cc computation, since we get to
2253 compare that against 0, which ought to be better than comparing
2254 the real output against 64. It also lets cc_dst be a convenient
2255 temporary during our computation. */
2256 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2258 /* R1 = IN ? CLZ(IN) : 64. */
2259 tcg_gen_clzi_i64(o->out, o->in2, 64);
2261 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2262 value by 64, which is undefined. But since the shift is 64 iff the
2263 input is zero, we still get the correct result after and'ing. */
2264 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2265 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2266 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2267 return NO_EXIT;
2270 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2272 int m3 = get_field(s->fields, m3);
2273 int pos, len, base = s->insn->data;
2274 TCGv_i64 tmp = tcg_temp_new_i64();
2275 uint64_t ccm;
2277 switch (m3) {
2278 case 0xf:
2279 /* Effectively a 32-bit load. */
2280 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2281 len = 32;
2282 goto one_insert;
2284 case 0xc:
2285 case 0x6:
2286 case 0x3:
2287 /* Effectively a 16-bit load. */
2288 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2289 len = 16;
2290 goto one_insert;
2292 case 0x8:
2293 case 0x4:
2294 case 0x2:
2295 case 0x1:
2296 /* Effectively an 8-bit load. */
2297 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2298 len = 8;
2299 goto one_insert;
2301 one_insert:
2302 pos = base + ctz32(m3) * 8;
2303 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2304 ccm = ((1ull << len) - 1) << pos;
2305 break;
2307 default:
2308 /* This is going to be a sequence of loads and inserts. */
2309 pos = base + 32 - 8;
2310 ccm = 0;
2311 while (m3) {
2312 if (m3 & 0x8) {
2313 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2314 tcg_gen_addi_i64(o->in2, o->in2, 1);
2315 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2316 ccm |= 0xff << pos;
2318 m3 = (m3 << 1) & 0xf;
2319 pos -= 8;
2321 break;
2324 tcg_gen_movi_i64(tmp, ccm);
2325 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2326 tcg_temp_free_i64(tmp);
2327 return NO_EXIT;
2330 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2332 int shift = s->insn->data & 0xff;
2333 int size = s->insn->data >> 8;
2334 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2335 return NO_EXIT;
2338 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2340 TCGv_i64 t1;
2342 gen_op_calc_cc(s);
2343 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2345 t1 = tcg_temp_new_i64();
2346 tcg_gen_shli_i64(t1, psw_mask, 20);
2347 tcg_gen_shri_i64(t1, t1, 36);
2348 tcg_gen_or_i64(o->out, o->out, t1);
2350 tcg_gen_extu_i32_i64(t1, cc_op);
2351 tcg_gen_shli_i64(t1, t1, 28);
2352 tcg_gen_or_i64(o->out, o->out, t1);
2353 tcg_temp_free_i64(t1);
2354 return NO_EXIT;
2357 #ifndef CONFIG_USER_ONLY
2358 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2360 TCGv_i32 m4;
2362 check_privileged(s);
2363 m4 = tcg_const_i32(get_field(s->fields, m4));
2364 gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2365 tcg_temp_free_i32(m4);
2366 return NO_EXIT;
2369 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2371 check_privileged(s);
2372 gen_helper_iske(o->out, cpu_env, o->in2);
2373 return NO_EXIT;
2375 #endif
2377 static ExitStatus op_keb(DisasContext *s, DisasOps *o)
2379 gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2380 set_cc_static(s);
2381 return NO_EXIT;
2384 static ExitStatus op_kdb(DisasContext *s, DisasOps *o)
2386 gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2387 set_cc_static(s);
2388 return NO_EXIT;
2391 static ExitStatus op_kxb(DisasContext *s, DisasOps *o)
2393 gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2394 set_cc_static(s);
2395 return NO_EXIT;
2398 static ExitStatus op_laa(DisasContext *s, DisasOps *o)
2400 /* The real output is indeed the original value in memory;
2401 recompute the addition for the computation of CC. */
2402 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2403 s->insn->data | MO_ALIGN);
2404 /* However, we need to recompute the addition for setting CC. */
2405 tcg_gen_add_i64(o->out, o->in1, o->in2);
2406 return NO_EXIT;
2409 static ExitStatus op_lan(DisasContext *s, DisasOps *o)
2411 /* The real output is indeed the original value in memory;
2412 recompute the addition for the computation of CC. */
2413 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2414 s->insn->data | MO_ALIGN);
2415 /* However, we need to recompute the operation for setting CC. */
2416 tcg_gen_and_i64(o->out, o->in1, o->in2);
2417 return NO_EXIT;
2420 static ExitStatus op_lao(DisasContext *s, DisasOps *o)
2422 /* The real output is indeed the original value in memory;
2423 recompute the addition for the computation of CC. */
2424 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2425 s->insn->data | MO_ALIGN);
2426 /* However, we need to recompute the operation for setting CC. */
2427 tcg_gen_or_i64(o->out, o->in1, o->in2);
2428 return NO_EXIT;
2431 static ExitStatus op_lax(DisasContext *s, DisasOps *o)
2433 /* The real output is indeed the original value in memory;
2434 recompute the addition for the computation of CC. */
2435 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2436 s->insn->data | MO_ALIGN);
2437 /* However, we need to recompute the operation for setting CC. */
2438 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2439 return NO_EXIT;
2442 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2444 gen_helper_ldeb(o->out, cpu_env, o->in2);
2445 return NO_EXIT;
2448 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2450 gen_helper_ledb(o->out, cpu_env, o->in2);
2451 return NO_EXIT;
2454 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2456 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2457 return NO_EXIT;
2460 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2462 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2463 return NO_EXIT;
2466 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2468 gen_helper_lxdb(o->out, cpu_env, o->in2);
2469 return_low128(o->out2);
2470 return NO_EXIT;
2473 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2475 gen_helper_lxeb(o->out, cpu_env, o->in2);
2476 return_low128(o->out2);
2477 return NO_EXIT;
2480 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2482 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2483 return NO_EXIT;
2486 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2488 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2489 return NO_EXIT;
2492 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2494 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2495 return NO_EXIT;
2498 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2500 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2501 return NO_EXIT;
2504 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2506 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2507 return NO_EXIT;
2510 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2512 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2513 return NO_EXIT;
2516 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2518 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2519 return NO_EXIT;
2522 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2524 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2525 return NO_EXIT;
2528 static ExitStatus op_lat(DisasContext *s, DisasOps *o)
2530 TCGLabel *lab = gen_new_label();
2531 store_reg32_i64(get_field(s->fields, r1), o->in2);
2532 /* The value is stored even in case of trap. */
2533 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2534 gen_trap(s);
2535 gen_set_label(lab);
2536 return NO_EXIT;
2539 static ExitStatus op_lgat(DisasContext *s, DisasOps *o)
2541 TCGLabel *lab = gen_new_label();
2542 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2543 /* The value is stored even in case of trap. */
2544 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2545 gen_trap(s);
2546 gen_set_label(lab);
2547 return NO_EXIT;
2550 static ExitStatus op_lfhat(DisasContext *s, DisasOps *o)
2552 TCGLabel *lab = gen_new_label();
2553 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2554 /* The value is stored even in case of trap. */
2555 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2556 gen_trap(s);
2557 gen_set_label(lab);
2558 return NO_EXIT;
2561 static ExitStatus op_llgfat(DisasContext *s, DisasOps *o)
2563 TCGLabel *lab = gen_new_label();
2564 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2565 /* The value is stored even in case of trap. */
2566 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2567 gen_trap(s);
2568 gen_set_label(lab);
2569 return NO_EXIT;
2572 static ExitStatus op_llgtat(DisasContext *s, DisasOps *o)
2574 TCGLabel *lab = gen_new_label();
2575 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2576 /* The value is stored even in case of trap. */
2577 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2578 gen_trap(s);
2579 gen_set_label(lab);
2580 return NO_EXIT;
2583 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2585 DisasCompare c;
2587 disas_jcc(s, &c, get_field(s->fields, m3));
2589 if (c.is_64) {
2590 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2591 o->in2, o->in1);
2592 free_compare(&c);
2593 } else {
2594 TCGv_i32 t32 = tcg_temp_new_i32();
2595 TCGv_i64 t, z;
2597 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2598 free_compare(&c);
2600 t = tcg_temp_new_i64();
2601 tcg_gen_extu_i32_i64(t, t32);
2602 tcg_temp_free_i32(t32);
2604 z = tcg_const_i64(0);
2605 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2606 tcg_temp_free_i64(t);
2607 tcg_temp_free_i64(z);
2610 return NO_EXIT;
2613 #ifndef CONFIG_USER_ONLY
2614 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2616 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2617 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2618 check_privileged(s);
2619 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2620 tcg_temp_free_i32(r1);
2621 tcg_temp_free_i32(r3);
2622 return NO_EXIT;
2625 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2627 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2628 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2629 check_privileged(s);
2630 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2631 tcg_temp_free_i32(r1);
2632 tcg_temp_free_i32(r3);
2633 return NO_EXIT;
2636 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2638 check_privileged(s);
2639 gen_helper_lra(o->out, cpu_env, o->in2);
2640 set_cc_static(s);
2641 return NO_EXIT;
2644 static ExitStatus op_lpp(DisasContext *s, DisasOps *o)
2646 check_privileged(s);
2648 tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
2649 return NO_EXIT;
2652 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2654 TCGv_i64 t1, t2;
2656 check_privileged(s);
2657 per_breaking_event(s);
2659 t1 = tcg_temp_new_i64();
2660 t2 = tcg_temp_new_i64();
2661 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2662 tcg_gen_addi_i64(o->in2, o->in2, 4);
2663 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2664 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2665 tcg_gen_shli_i64(t1, t1, 32);
2666 gen_helper_load_psw(cpu_env, t1, t2);
2667 tcg_temp_free_i64(t1);
2668 tcg_temp_free_i64(t2);
2669 return EXIT_NORETURN;
2672 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2674 TCGv_i64 t1, t2;
2676 check_privileged(s);
2677 per_breaking_event(s);
2679 t1 = tcg_temp_new_i64();
2680 t2 = tcg_temp_new_i64();
2681 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2682 tcg_gen_addi_i64(o->in2, o->in2, 8);
2683 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2684 gen_helper_load_psw(cpu_env, t1, t2);
2685 tcg_temp_free_i64(t1);
2686 tcg_temp_free_i64(t2);
2687 return EXIT_NORETURN;
2689 #endif
2691 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2693 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2694 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2695 gen_helper_lam(cpu_env, r1, o->in2, r3);
2696 tcg_temp_free_i32(r1);
2697 tcg_temp_free_i32(r3);
2698 return NO_EXIT;
2701 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2703 int r1 = get_field(s->fields, r1);
2704 int r3 = get_field(s->fields, r3);
2705 TCGv_i64 t1, t2;
2707 /* Only one register to read. */
2708 t1 = tcg_temp_new_i64();
2709 if (unlikely(r1 == r3)) {
2710 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2711 store_reg32_i64(r1, t1);
2712 tcg_temp_free(t1);
2713 return NO_EXIT;
2716 /* First load the values of the first and last registers to trigger
2717 possible page faults. */
2718 t2 = tcg_temp_new_i64();
2719 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2720 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2721 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2722 store_reg32_i64(r1, t1);
2723 store_reg32_i64(r3, t2);
2725 /* Only two registers to read. */
2726 if (((r1 + 1) & 15) == r3) {
2727 tcg_temp_free(t2);
2728 tcg_temp_free(t1);
2729 return NO_EXIT;
2732 /* Then load the remaining registers. Page fault can't occur. */
2733 r3 = (r3 - 1) & 15;
2734 tcg_gen_movi_i64(t2, 4);
2735 while (r1 != r3) {
2736 r1 = (r1 + 1) & 15;
2737 tcg_gen_add_i64(o->in2, o->in2, t2);
2738 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2739 store_reg32_i64(r1, t1);
2741 tcg_temp_free(t2);
2742 tcg_temp_free(t1);
2744 return NO_EXIT;
2747 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2749 int r1 = get_field(s->fields, r1);
2750 int r3 = get_field(s->fields, r3);
2751 TCGv_i64 t1, t2;
2753 /* Only one register to read. */
2754 t1 = tcg_temp_new_i64();
2755 if (unlikely(r1 == r3)) {
2756 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2757 store_reg32h_i64(r1, t1);
2758 tcg_temp_free(t1);
2759 return NO_EXIT;
2762 /* First load the values of the first and last registers to trigger
2763 possible page faults. */
2764 t2 = tcg_temp_new_i64();
2765 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2766 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2767 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2768 store_reg32h_i64(r1, t1);
2769 store_reg32h_i64(r3, t2);
2771 /* Only two registers to read. */
2772 if (((r1 + 1) & 15) == r3) {
2773 tcg_temp_free(t2);
2774 tcg_temp_free(t1);
2775 return NO_EXIT;
2778 /* Then load the remaining registers. Page fault can't occur. */
2779 r3 = (r3 - 1) & 15;
2780 tcg_gen_movi_i64(t2, 4);
2781 while (r1 != r3) {
2782 r1 = (r1 + 1) & 15;
2783 tcg_gen_add_i64(o->in2, o->in2, t2);
2784 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2785 store_reg32h_i64(r1, t1);
2787 tcg_temp_free(t2);
2788 tcg_temp_free(t1);
2790 return NO_EXIT;
2793 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2795 int r1 = get_field(s->fields, r1);
2796 int r3 = get_field(s->fields, r3);
2797 TCGv_i64 t1, t2;
2799 /* Only one register to read. */
2800 if (unlikely(r1 == r3)) {
2801 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2802 return NO_EXIT;
2805 /* First load the values of the first and last registers to trigger
2806 possible page faults. */
2807 t1 = tcg_temp_new_i64();
2808 t2 = tcg_temp_new_i64();
2809 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2810 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
2811 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
2812 tcg_gen_mov_i64(regs[r1], t1);
2813 tcg_temp_free(t2);
2815 /* Only two registers to read. */
2816 if (((r1 + 1) & 15) == r3) {
2817 tcg_temp_free(t1);
2818 return NO_EXIT;
2821 /* Then load the remaining registers. Page fault can't occur. */
2822 r3 = (r3 - 1) & 15;
2823 tcg_gen_movi_i64(t1, 8);
2824 while (r1 != r3) {
2825 r1 = (r1 + 1) & 15;
2826 tcg_gen_add_i64(o->in2, o->in2, t1);
2827 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2829 tcg_temp_free(t1);
2831 return NO_EXIT;
2834 static ExitStatus op_lpd(DisasContext *s, DisasOps *o)
2836 TCGv_i64 a1, a2;
2837 TCGMemOp mop = s->insn->data;
2839 /* In a parallel context, stop the world and single step. */
2840 if (parallel_cpus) {
2841 potential_page_fault(s);
2842 gen_exception(EXCP_ATOMIC);
2843 return EXIT_NORETURN;
2846 /* In a serial context, perform the two loads ... */
2847 a1 = get_address(s, 0, get_field(s->fields, b1), get_field(s->fields, d1));
2848 a2 = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
2849 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
2850 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
2851 tcg_temp_free_i64(a1);
2852 tcg_temp_free_i64(a2);
2854 /* ... and indicate that we performed them while interlocked. */
2855 gen_op_movi_cc(s, 0);
2856 return NO_EXIT;
2859 #ifndef CONFIG_USER_ONLY
2860 static ExitStatus op_lura(DisasContext *s, DisasOps *o)
2862 check_privileged(s);
2863 potential_page_fault(s);
2864 gen_helper_lura(o->out, cpu_env, o->in2);
2865 return NO_EXIT;
2868 static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
2870 check_privileged(s);
2871 potential_page_fault(s);
2872 gen_helper_lurag(o->out, cpu_env, o->in2);
2873 return NO_EXIT;
2875 #endif
2877 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2879 o->out = o->in2;
2880 o->g_out = o->g_in2;
2881 TCGV_UNUSED_I64(o->in2);
2882 o->g_in2 = false;
2883 return NO_EXIT;
2886 static ExitStatus op_mov2e(DisasContext *s, DisasOps *o)
2888 int b2 = get_field(s->fields, b2);
2889 TCGv ar1 = tcg_temp_new_i64();
2891 o->out = o->in2;
2892 o->g_out = o->g_in2;
2893 TCGV_UNUSED_I64(o->in2);
2894 o->g_in2 = false;
2896 switch (s->tb->flags & FLAG_MASK_ASC) {
2897 case PSW_ASC_PRIMARY >> 32:
2898 tcg_gen_movi_i64(ar1, 0);
2899 break;
2900 case PSW_ASC_ACCREG >> 32:
2901 tcg_gen_movi_i64(ar1, 1);
2902 break;
2903 case PSW_ASC_SECONDARY >> 32:
2904 if (b2) {
2905 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
2906 } else {
2907 tcg_gen_movi_i64(ar1, 0);
2909 break;
2910 case PSW_ASC_HOME >> 32:
2911 tcg_gen_movi_i64(ar1, 2);
2912 break;
2915 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
2916 tcg_temp_free_i64(ar1);
2918 return NO_EXIT;
2921 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2923 o->out = o->in1;
2924 o->out2 = o->in2;
2925 o->g_out = o->g_in1;
2926 o->g_out2 = o->g_in2;
2927 TCGV_UNUSED_I64(o->in1);
2928 TCGV_UNUSED_I64(o->in2);
2929 o->g_in1 = o->g_in2 = false;
2930 return NO_EXIT;
2933 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2935 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2936 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2937 tcg_temp_free_i32(l);
2938 return NO_EXIT;
2941 static ExitStatus op_mvcin(DisasContext *s, DisasOps *o)
2943 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2944 gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
2945 tcg_temp_free_i32(l);
2946 return NO_EXIT;
2949 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2951 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2952 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2953 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2954 tcg_temp_free_i32(r1);
2955 tcg_temp_free_i32(r2);
2956 set_cc_static(s);
2957 return NO_EXIT;
2960 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2962 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2963 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2964 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2965 tcg_temp_free_i32(r1);
2966 tcg_temp_free_i32(r3);
2967 set_cc_static(s);
2968 return NO_EXIT;
2971 #ifndef CONFIG_USER_ONLY
2972 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2974 int r1 = get_field(s->fields, l1);
2975 check_privileged(s);
2976 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2977 set_cc_static(s);
2978 return NO_EXIT;
2981 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2983 int r1 = get_field(s->fields, l1);
2984 check_privileged(s);
2985 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2986 set_cc_static(s);
2987 return NO_EXIT;
2989 #endif
2991 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2993 gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
2994 set_cc_static(s);
2995 return NO_EXIT;
2998 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
3000 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3001 set_cc_static(s);
3002 return_low128(o->in2);
3003 return NO_EXIT;
3006 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
3008 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3009 return NO_EXIT;
3012 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
3014 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3015 return NO_EXIT;
3018 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
3020 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3021 return NO_EXIT;
3024 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
3026 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3027 return NO_EXIT;
3030 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
3032 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3033 return NO_EXIT;
3036 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
3038 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3039 return_low128(o->out2);
3040 return NO_EXIT;
3043 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
3045 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3046 return_low128(o->out2);
3047 return NO_EXIT;
3050 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
3052 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3053 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3054 tcg_temp_free_i64(r3);
3055 return NO_EXIT;
3058 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
3060 int r3 = get_field(s->fields, r3);
3061 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3062 return NO_EXIT;
3065 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
3067 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3068 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3069 tcg_temp_free_i64(r3);
3070 return NO_EXIT;
3073 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
3075 int r3 = get_field(s->fields, r3);
3076 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3077 return NO_EXIT;
3080 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
3082 TCGv_i64 z, n;
3083 z = tcg_const_i64(0);
3084 n = tcg_temp_new_i64();
3085 tcg_gen_neg_i64(n, o->in2);
3086 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3087 tcg_temp_free_i64(n);
3088 tcg_temp_free_i64(z);
3089 return NO_EXIT;
3092 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
3094 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3095 return NO_EXIT;
3098 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
3100 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3101 return NO_EXIT;
3104 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
3106 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3107 tcg_gen_mov_i64(o->out2, o->in2);
3108 return NO_EXIT;
3111 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
3113 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3114 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3115 tcg_temp_free_i32(l);
3116 set_cc_static(s);
3117 return NO_EXIT;
3120 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
3122 tcg_gen_neg_i64(o->out, o->in2);
3123 return NO_EXIT;
3126 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
3128 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3129 return NO_EXIT;
3132 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
3134 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3135 return NO_EXIT;
3138 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
3140 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3141 tcg_gen_mov_i64(o->out2, o->in2);
3142 return NO_EXIT;
3145 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
3147 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3148 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3149 tcg_temp_free_i32(l);
3150 set_cc_static(s);
3151 return NO_EXIT;
3154 static ExitStatus op_or(DisasContext *s, DisasOps *o)
3156 tcg_gen_or_i64(o->out, o->in1, o->in2);
3157 return NO_EXIT;
3160 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
3162 int shift = s->insn->data & 0xff;
3163 int size = s->insn->data >> 8;
3164 uint64_t mask = ((1ull << size) - 1) << shift;
3166 assert(!o->g_in2);
3167 tcg_gen_shli_i64(o->in2, o->in2, shift);
3168 tcg_gen_or_i64(o->out, o->in1, o->in2);
3170 /* Produce the CC from only the bits manipulated. */
3171 tcg_gen_andi_i64(cc_dst, o->out, mask);
3172 set_cc_nz_u64(s, cc_dst);
3173 return NO_EXIT;
3176 static ExitStatus op_pack(DisasContext *s, DisasOps *o)
3178 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3179 gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3180 tcg_temp_free_i32(l);
3181 return NO_EXIT;
3184 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
3186 gen_helper_popcnt(o->out, o->in2);
3187 return NO_EXIT;
3190 #ifndef CONFIG_USER_ONLY
3191 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
3193 check_privileged(s);
3194 gen_helper_ptlb(cpu_env);
3195 return NO_EXIT;
3197 #endif
3199 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
3201 int i3 = get_field(s->fields, i3);
3202 int i4 = get_field(s->fields, i4);
3203 int i5 = get_field(s->fields, i5);
3204 int do_zero = i4 & 0x80;
3205 uint64_t mask, imask, pmask;
3206 int pos, len, rot;
3208 /* Adjust the arguments for the specific insn. */
3209 switch (s->fields->op2) {
3210 case 0x55: /* risbg */
3211 i3 &= 63;
3212 i4 &= 63;
3213 pmask = ~0;
3214 break;
3215 case 0x5d: /* risbhg */
3216 i3 &= 31;
3217 i4 &= 31;
3218 pmask = 0xffffffff00000000ull;
3219 break;
3220 case 0x51: /* risblg */
3221 i3 &= 31;
3222 i4 &= 31;
3223 pmask = 0x00000000ffffffffull;
3224 break;
3225 default:
3226 abort();
3229 /* MASK is the set of bits to be inserted from R2.
3230 Take care for I3/I4 wraparound. */
3231 mask = pmask >> i3;
3232 if (i3 <= i4) {
3233 mask ^= pmask >> i4 >> 1;
3234 } else {
3235 mask |= ~(pmask >> i4 >> 1);
3237 mask &= pmask;
3239 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3240 insns, we need to keep the other half of the register. */
3241 imask = ~mask | ~pmask;
3242 if (do_zero) {
3243 if (s->fields->op2 == 0x55) {
3244 imask = 0;
3245 } else {
3246 imask = ~pmask;
3250 len = i4 - i3 + 1;
3251 pos = 63 - i4;
3252 rot = i5 & 63;
3253 if (s->fields->op2 == 0x5d) {
3254 pos += 32;
3257 /* In some cases we can implement this with extract. */
3258 if (imask == 0 && pos == 0 && len > 0 && rot + len <= 64) {
3259 tcg_gen_extract_i64(o->out, o->in2, rot, len);
3260 return NO_EXIT;
3263 /* In some cases we can implement this with deposit. */
3264 if (len > 0 && (imask == 0 || ~mask == imask)) {
3265 /* Note that we rotate the bits to be inserted to the lsb, not to
3266 the position as described in the PoO. */
3267 rot = (rot - pos) & 63;
3268 } else {
3269 pos = -1;
3272 /* Rotate the input as necessary. */
3273 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3275 /* Insert the selected bits into the output. */
3276 if (pos >= 0) {
3277 if (imask == 0) {
3278 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3279 } else {
3280 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3282 } else if (imask == 0) {
3283 tcg_gen_andi_i64(o->out, o->in2, mask);
3284 } else {
3285 tcg_gen_andi_i64(o->in2, o->in2, mask);
3286 tcg_gen_andi_i64(o->out, o->out, imask);
3287 tcg_gen_or_i64(o->out, o->out, o->in2);
3289 return NO_EXIT;
3292 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
3294 int i3 = get_field(s->fields, i3);
3295 int i4 = get_field(s->fields, i4);
3296 int i5 = get_field(s->fields, i5);
3297 uint64_t mask;
3299 /* If this is a test-only form, arrange to discard the result. */
3300 if (i3 & 0x80) {
3301 o->out = tcg_temp_new_i64();
3302 o->g_out = false;
3305 i3 &= 63;
3306 i4 &= 63;
3307 i5 &= 63;
3309 /* MASK is the set of bits to be operated on from R2.
3310 Take care for I3/I4 wraparound. */
3311 mask = ~0ull >> i3;
3312 if (i3 <= i4) {
3313 mask ^= ~0ull >> i4 >> 1;
3314 } else {
3315 mask |= ~(~0ull >> i4 >> 1);
3318 /* Rotate the input as necessary. */
3319 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3321 /* Operate. */
3322 switch (s->fields->op2) {
3323 case 0x55: /* AND */
3324 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3325 tcg_gen_and_i64(o->out, o->out, o->in2);
3326 break;
3327 case 0x56: /* OR */
3328 tcg_gen_andi_i64(o->in2, o->in2, mask);
3329 tcg_gen_or_i64(o->out, o->out, o->in2);
3330 break;
3331 case 0x57: /* XOR */
3332 tcg_gen_andi_i64(o->in2, o->in2, mask);
3333 tcg_gen_xor_i64(o->out, o->out, o->in2);
3334 break;
3335 default:
3336 abort();
3339 /* Set the CC. */
3340 tcg_gen_andi_i64(cc_dst, o->out, mask);
3341 set_cc_nz_u64(s, cc_dst);
3342 return NO_EXIT;
3345 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
3347 tcg_gen_bswap16_i64(o->out, o->in2);
3348 return NO_EXIT;
3351 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
3353 tcg_gen_bswap32_i64(o->out, o->in2);
3354 return NO_EXIT;
3357 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
3359 tcg_gen_bswap64_i64(o->out, o->in2);
3360 return NO_EXIT;
3363 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
3365 TCGv_i32 t1 = tcg_temp_new_i32();
3366 TCGv_i32 t2 = tcg_temp_new_i32();
3367 TCGv_i32 to = tcg_temp_new_i32();
3368 tcg_gen_extrl_i64_i32(t1, o->in1);
3369 tcg_gen_extrl_i64_i32(t2, o->in2);
3370 tcg_gen_rotl_i32(to, t1, t2);
3371 tcg_gen_extu_i32_i64(o->out, to);
3372 tcg_temp_free_i32(t1);
3373 tcg_temp_free_i32(t2);
3374 tcg_temp_free_i32(to);
3375 return NO_EXIT;
3378 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
3380 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3381 return NO_EXIT;
3384 #ifndef CONFIG_USER_ONLY
3385 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
3387 check_privileged(s);
3388 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3389 set_cc_static(s);
3390 return NO_EXIT;
3393 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
3395 check_privileged(s);
3396 gen_helper_sacf(cpu_env, o->in2);
3397 /* Addressing mode has changed, so end the block. */
3398 return EXIT_PC_STALE;
3400 #endif
3402 static ExitStatus op_sam(DisasContext *s, DisasOps *o)
3404 int sam = s->insn->data;
3405 TCGv_i64 tsam;
3406 uint64_t mask;
3408 switch (sam) {
3409 case 0:
3410 mask = 0xffffff;
3411 break;
3412 case 1:
3413 mask = 0x7fffffff;
3414 break;
3415 default:
3416 mask = -1;
3417 break;
3420 /* Bizarre but true, we check the address of the current insn for the
3421 specification exception, not the next to be executed. Thus the PoO
3422 documents that Bad Things Happen two bytes before the end. */
3423 if (s->pc & ~mask) {
3424 gen_program_exception(s, PGM_SPECIFICATION);
3425 return EXIT_NORETURN;
3427 s->next_pc &= mask;
3429 tsam = tcg_const_i64(sam);
3430 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3431 tcg_temp_free_i64(tsam);
3433 /* Always exit the TB, since we (may have) changed execution mode. */
3434 return EXIT_PC_STALE;
3437 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
3439 int r1 = get_field(s->fields, r1);
3440 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3441 return NO_EXIT;
3444 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
3446 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3447 return NO_EXIT;
3450 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
3452 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3453 return NO_EXIT;
3456 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
3458 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3459 return_low128(o->out2);
3460 return NO_EXIT;
3463 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
3465 gen_helper_sqeb(o->out, cpu_env, o->in2);
3466 return NO_EXIT;
3469 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
3471 gen_helper_sqdb(o->out, cpu_env, o->in2);
3472 return NO_EXIT;
3475 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
3477 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3478 return_low128(o->out2);
3479 return NO_EXIT;
3482 #ifndef CONFIG_USER_ONLY
3483 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
3485 check_privileged(s);
3486 potential_page_fault(s);
3487 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3488 set_cc_static(s);
3489 return NO_EXIT;
3492 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3494 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3495 check_privileged(s);
3496 potential_page_fault(s);
3497 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
3498 set_cc_static(s);
3499 tcg_temp_free_i32(r1);
3500 return NO_EXIT;
3502 #endif
3504 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3506 DisasCompare c;
3507 TCGv_i64 a;
3508 TCGLabel *lab;
3509 int r1;
3511 disas_jcc(s, &c, get_field(s->fields, m3));
3513 /* We want to store when the condition is fulfilled, so branch
3514 out when it's not */
3515 c.cond = tcg_invert_cond(c.cond);
3517 lab = gen_new_label();
3518 if (c.is_64) {
3519 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3520 } else {
3521 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3523 free_compare(&c);
3525 r1 = get_field(s->fields, r1);
3526 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3527 if (s->insn->data) {
3528 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3529 } else {
3530 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3532 tcg_temp_free_i64(a);
3534 gen_set_label(lab);
3535 return NO_EXIT;
3538 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3540 uint64_t sign = 1ull << s->insn->data;
3541 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3542 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3543 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3544 /* The arithmetic left shift is curious in that it does not affect
3545 the sign bit. Copy that over from the source unchanged. */
3546 tcg_gen_andi_i64(o->out, o->out, ~sign);
3547 tcg_gen_andi_i64(o->in1, o->in1, sign);
3548 tcg_gen_or_i64(o->out, o->out, o->in1);
3549 return NO_EXIT;
3552 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3554 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3555 return NO_EXIT;
3558 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3560 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3561 return NO_EXIT;
3564 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3566 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3567 return NO_EXIT;
3570 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3572 gen_helper_sfpc(cpu_env, o->in2);
3573 return NO_EXIT;
3576 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3578 gen_helper_sfas(cpu_env, o->in2);
3579 return NO_EXIT;
3582 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3584 int b2 = get_field(s->fields, b2);
3585 int d2 = get_field(s->fields, d2);
3586 TCGv_i64 t1 = tcg_temp_new_i64();
3587 TCGv_i64 t2 = tcg_temp_new_i64();
3588 int mask, pos, len;
3590 switch (s->fields->op2) {
3591 case 0x99: /* SRNM */
3592 pos = 0, len = 2;
3593 break;
3594 case 0xb8: /* SRNMB */
3595 pos = 0, len = 3;
3596 break;
3597 case 0xb9: /* SRNMT */
3598 pos = 4, len = 3;
3599 break;
3600 default:
3601 tcg_abort();
3603 mask = (1 << len) - 1;
3605 /* Insert the value into the appropriate field of the FPC. */
3606 if (b2 == 0) {
3607 tcg_gen_movi_i64(t1, d2 & mask);
3608 } else {
3609 tcg_gen_addi_i64(t1, regs[b2], d2);
3610 tcg_gen_andi_i64(t1, t1, mask);
3612 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3613 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3614 tcg_temp_free_i64(t1);
3616 /* Then install the new FPC to set the rounding mode in fpu_status. */
3617 gen_helper_sfpc(cpu_env, t2);
3618 tcg_temp_free_i64(t2);
3619 return NO_EXIT;
3622 #ifndef CONFIG_USER_ONLY
3623 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3625 check_privileged(s);
3626 tcg_gen_shri_i64(o->in2, o->in2, 4);
3627 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
3628 return NO_EXIT;
3631 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3633 check_privileged(s);
3634 gen_helper_sske(cpu_env, o->in1, o->in2);
3635 return NO_EXIT;
3638 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3640 check_privileged(s);
3641 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3642 return NO_EXIT;
3645 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3647 check_privileged(s);
3648 /* ??? Surely cpu address != cpu number. In any case the previous
3649 version of this stored more than the required half-word, so it
3650 is unlikely this has ever been tested. */
3651 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3652 return NO_EXIT;
3655 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3657 gen_helper_stck(o->out, cpu_env);
3658 /* ??? We don't implement clock states. */
3659 gen_op_movi_cc(s, 0);
3660 return NO_EXIT;
3663 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3665 TCGv_i64 c1 = tcg_temp_new_i64();
3666 TCGv_i64 c2 = tcg_temp_new_i64();
3667 gen_helper_stck(c1, cpu_env);
3668 /* Shift the 64-bit value into its place as a zero-extended
3669 104-bit value. Note that "bit positions 64-103 are always
3670 non-zero so that they compare differently to STCK"; we set
3671 the least significant bit to 1. */
3672 tcg_gen_shli_i64(c2, c1, 56);
3673 tcg_gen_shri_i64(c1, c1, 8);
3674 tcg_gen_ori_i64(c2, c2, 0x10000);
3675 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3676 tcg_gen_addi_i64(o->in2, o->in2, 8);
3677 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3678 tcg_temp_free_i64(c1);
3679 tcg_temp_free_i64(c2);
3680 /* ??? We don't implement clock states. */
3681 gen_op_movi_cc(s, 0);
3682 return NO_EXIT;
3685 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3687 check_privileged(s);
3688 gen_helper_sckc(cpu_env, o->in2);
3689 return NO_EXIT;
3692 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3694 check_privileged(s);
3695 gen_helper_stckc(o->out, cpu_env);
3696 return NO_EXIT;
3699 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3701 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3702 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3703 check_privileged(s);
3704 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3705 tcg_temp_free_i32(r1);
3706 tcg_temp_free_i32(r3);
3707 return NO_EXIT;
3710 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3712 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3713 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3714 check_privileged(s);
3715 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3716 tcg_temp_free_i32(r1);
3717 tcg_temp_free_i32(r3);
3718 return NO_EXIT;
3721 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3723 TCGv_i64 t1 = tcg_temp_new_i64();
3725 check_privileged(s);
3726 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3727 tcg_gen_ld32u_i64(t1, cpu_env, offsetof(CPUS390XState, machine_type));
3728 tcg_gen_deposit_i64(o->out, o->out, t1, 32, 32);
3729 tcg_temp_free_i64(t1);
3731 return NO_EXIT;
3734 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3736 check_privileged(s);
3737 gen_helper_spt(cpu_env, o->in2);
3738 return NO_EXIT;
3741 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3743 check_privileged(s);
3744 gen_helper_stfl(cpu_env);
3745 return NO_EXIT;
3748 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3750 check_privileged(s);
3751 gen_helper_stpt(o->out, cpu_env);
3752 return NO_EXIT;
3755 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3757 check_privileged(s);
3758 potential_page_fault(s);
3759 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3760 set_cc_static(s);
3761 return NO_EXIT;
3764 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3766 check_privileged(s);
3767 gen_helper_spx(cpu_env, o->in2);
3768 return NO_EXIT;
3771 static ExitStatus op_xsch(DisasContext *s, DisasOps *o)
3773 check_privileged(s);
3774 potential_page_fault(s);
3775 gen_helper_xsch(cpu_env, regs[1]);
3776 set_cc_static(s);
3777 return NO_EXIT;
3780 static ExitStatus op_csch(DisasContext *s, DisasOps *o)
3782 check_privileged(s);
3783 potential_page_fault(s);
3784 gen_helper_csch(cpu_env, regs[1]);
3785 set_cc_static(s);
3786 return NO_EXIT;
3789 static ExitStatus op_hsch(DisasContext *s, DisasOps *o)
3791 check_privileged(s);
3792 potential_page_fault(s);
3793 gen_helper_hsch(cpu_env, regs[1]);
3794 set_cc_static(s);
3795 return NO_EXIT;
3798 static ExitStatus op_msch(DisasContext *s, DisasOps *o)
3800 check_privileged(s);
3801 potential_page_fault(s);
3802 gen_helper_msch(cpu_env, regs[1], o->in2);
3803 set_cc_static(s);
3804 return NO_EXIT;
3807 static ExitStatus op_rchp(DisasContext *s, DisasOps *o)
3809 check_privileged(s);
3810 potential_page_fault(s);
3811 gen_helper_rchp(cpu_env, regs[1]);
3812 set_cc_static(s);
3813 return NO_EXIT;
3816 static ExitStatus op_rsch(DisasContext *s, DisasOps *o)
3818 check_privileged(s);
3819 potential_page_fault(s);
3820 gen_helper_rsch(cpu_env, regs[1]);
3821 set_cc_static(s);
3822 return NO_EXIT;
3825 static ExitStatus op_ssch(DisasContext *s, DisasOps *o)
3827 check_privileged(s);
3828 potential_page_fault(s);
3829 gen_helper_ssch(cpu_env, regs[1], o->in2);
3830 set_cc_static(s);
3831 return NO_EXIT;
3834 static ExitStatus op_stsch(DisasContext *s, DisasOps *o)
3836 check_privileged(s);
3837 potential_page_fault(s);
3838 gen_helper_stsch(cpu_env, regs[1], o->in2);
3839 set_cc_static(s);
3840 return NO_EXIT;
3843 static ExitStatus op_tsch(DisasContext *s, DisasOps *o)
3845 check_privileged(s);
3846 potential_page_fault(s);
3847 gen_helper_tsch(cpu_env, regs[1], o->in2);
3848 set_cc_static(s);
3849 return NO_EXIT;
3852 static ExitStatus op_chsc(DisasContext *s, DisasOps *o)
3854 check_privileged(s);
3855 potential_page_fault(s);
3856 gen_helper_chsc(cpu_env, o->in2);
3857 set_cc_static(s);
3858 return NO_EXIT;
3861 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
3863 check_privileged(s);
3864 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
3865 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
3866 return NO_EXIT;
3869 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3871 uint64_t i2 = get_field(s->fields, i2);
3872 TCGv_i64 t;
3874 check_privileged(s);
3876 /* It is important to do what the instruction name says: STORE THEN.
3877 If we let the output hook perform the store then if we fault and
3878 restart, we'll have the wrong SYSTEM MASK in place. */
3879 t = tcg_temp_new_i64();
3880 tcg_gen_shri_i64(t, psw_mask, 56);
3881 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3882 tcg_temp_free_i64(t);
3884 if (s->fields->op == 0xac) {
3885 tcg_gen_andi_i64(psw_mask, psw_mask,
3886 (i2 << 56) | 0x00ffffffffffffffull);
3887 } else {
3888 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3890 return NO_EXIT;
3893 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
3895 check_privileged(s);
3896 potential_page_fault(s);
3897 gen_helper_stura(cpu_env, o->in2, o->in1);
3898 return NO_EXIT;
3901 static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
3903 check_privileged(s);
3904 potential_page_fault(s);
3905 gen_helper_sturg(cpu_env, o->in2, o->in1);
3906 return NO_EXIT;
3908 #endif
3910 static ExitStatus op_stfle(DisasContext *s, DisasOps *o)
3912 potential_page_fault(s);
3913 gen_helper_stfle(cc_op, cpu_env, o->in2);
3914 set_cc_static(s);
3915 return NO_EXIT;
3918 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3920 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3921 return NO_EXIT;
3924 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3926 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3927 return NO_EXIT;
3930 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3932 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3933 return NO_EXIT;
3936 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3938 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3939 return NO_EXIT;
3942 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3944 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3945 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3946 gen_helper_stam(cpu_env, r1, o->in2, r3);
3947 tcg_temp_free_i32(r1);
3948 tcg_temp_free_i32(r3);
3949 return NO_EXIT;
3952 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3954 int m3 = get_field(s->fields, m3);
3955 int pos, base = s->insn->data;
3956 TCGv_i64 tmp = tcg_temp_new_i64();
3958 pos = base + ctz32(m3) * 8;
3959 switch (m3) {
3960 case 0xf:
3961 /* Effectively a 32-bit store. */
3962 tcg_gen_shri_i64(tmp, o->in1, pos);
3963 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3964 break;
3966 case 0xc:
3967 case 0x6:
3968 case 0x3:
3969 /* Effectively a 16-bit store. */
3970 tcg_gen_shri_i64(tmp, o->in1, pos);
3971 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3972 break;
3974 case 0x8:
3975 case 0x4:
3976 case 0x2:
3977 case 0x1:
3978 /* Effectively an 8-bit store. */
3979 tcg_gen_shri_i64(tmp, o->in1, pos);
3980 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3981 break;
3983 default:
3984 /* This is going to be a sequence of shifts and stores. */
3985 pos = base + 32 - 8;
3986 while (m3) {
3987 if (m3 & 0x8) {
3988 tcg_gen_shri_i64(tmp, o->in1, pos);
3989 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3990 tcg_gen_addi_i64(o->in2, o->in2, 1);
3992 m3 = (m3 << 1) & 0xf;
3993 pos -= 8;
3995 break;
3997 tcg_temp_free_i64(tmp);
3998 return NO_EXIT;
4001 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
4003 int r1 = get_field(s->fields, r1);
4004 int r3 = get_field(s->fields, r3);
4005 int size = s->insn->data;
4006 TCGv_i64 tsize = tcg_const_i64(size);
4008 while (1) {
4009 if (size == 8) {
4010 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4011 } else {
4012 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4014 if (r1 == r3) {
4015 break;
4017 tcg_gen_add_i64(o->in2, o->in2, tsize);
4018 r1 = (r1 + 1) & 15;
4021 tcg_temp_free_i64(tsize);
4022 return NO_EXIT;
4025 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
4027 int r1 = get_field(s->fields, r1);
4028 int r3 = get_field(s->fields, r3);
4029 TCGv_i64 t = tcg_temp_new_i64();
4030 TCGv_i64 t4 = tcg_const_i64(4);
4031 TCGv_i64 t32 = tcg_const_i64(32);
4033 while (1) {
4034 tcg_gen_shl_i64(t, regs[r1], t32);
4035 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4036 if (r1 == r3) {
4037 break;
4039 tcg_gen_add_i64(o->in2, o->in2, t4);
4040 r1 = (r1 + 1) & 15;
4043 tcg_temp_free_i64(t);
4044 tcg_temp_free_i64(t4);
4045 tcg_temp_free_i64(t32);
4046 return NO_EXIT;
4049 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
4051 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
4052 set_cc_static(s);
4053 return_low128(o->in2);
4054 return NO_EXIT;
4057 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
4059 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4060 return NO_EXIT;
4063 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
4065 DisasCompare cmp;
4066 TCGv_i64 borrow;
4068 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4070 /* The !borrow flag is the msb of CC. Since we want the inverse of
4071 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4072 disas_jcc(s, &cmp, 8 | 4);
4073 borrow = tcg_temp_new_i64();
4074 if (cmp.is_64) {
4075 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
4076 } else {
4077 TCGv_i32 t = tcg_temp_new_i32();
4078 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
4079 tcg_gen_extu_i32_i64(borrow, t);
4080 tcg_temp_free_i32(t);
4082 free_compare(&cmp);
4084 tcg_gen_sub_i64(o->out, o->out, borrow);
4085 tcg_temp_free_i64(borrow);
4086 return NO_EXIT;
4089 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
4091 TCGv_i32 t;
4093 update_psw_addr(s);
4094 update_cc_op(s);
4096 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
4097 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4098 tcg_temp_free_i32(t);
4100 t = tcg_const_i32(s->ilen);
4101 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4102 tcg_temp_free_i32(t);
4104 gen_exception(EXCP_SVC);
4105 return EXIT_NORETURN;
4108 static ExitStatus op_tam(DisasContext *s, DisasOps *o)
4110 int cc = 0;
4112 cc |= (s->tb->flags & FLAG_MASK_64) ? 2 : 0;
4113 cc |= (s->tb->flags & FLAG_MASK_32) ? 1 : 0;
4114 gen_op_movi_cc(s, cc);
4115 return NO_EXIT;
4118 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
4120 gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4121 set_cc_static(s);
4122 return NO_EXIT;
4125 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
4127 gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4128 set_cc_static(s);
4129 return NO_EXIT;
4132 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
4134 gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4135 set_cc_static(s);
4136 return NO_EXIT;
4139 #ifndef CONFIG_USER_ONLY
4141 static ExitStatus op_testblock(DisasContext *s, DisasOps *o)
4143 check_privileged(s);
4144 gen_helper_testblock(cc_op, cpu_env, o->in2);
4145 set_cc_static(s);
4146 return NO_EXIT;
4149 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
4151 gen_helper_tprot(cc_op, o->addr1, o->in2);
4152 set_cc_static(s);
4153 return NO_EXIT;
4156 #endif
4158 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
4160 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4161 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4162 tcg_temp_free_i32(l);
4163 set_cc_static(s);
4164 return NO_EXIT;
4167 static ExitStatus op_tre(DisasContext *s, DisasOps *o)
4169 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4170 return_low128(o->out2);
4171 set_cc_static(s);
4172 return NO_EXIT;
4175 static ExitStatus op_trt(DisasContext *s, DisasOps *o)
4177 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4178 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4179 tcg_temp_free_i32(l);
4180 set_cc_static(s);
4181 return NO_EXIT;
4184 static ExitStatus op_ts(DisasContext *s, DisasOps *o)
4186 TCGv_i32 t1 = tcg_const_i32(0xff);
4187 tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4188 tcg_gen_extract_i32(cc_op, t1, 7, 1);
4189 tcg_temp_free_i32(t1);
4190 set_cc_static(s);
4191 return NO_EXIT;
4194 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
4196 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4197 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4198 tcg_temp_free_i32(l);
4199 return NO_EXIT;
4202 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
4204 int d1 = get_field(s->fields, d1);
4205 int d2 = get_field(s->fields, d2);
4206 int b1 = get_field(s->fields, b1);
4207 int b2 = get_field(s->fields, b2);
4208 int l = get_field(s->fields, l1);
4209 TCGv_i32 t32;
4211 o->addr1 = get_address(s, 0, b1, d1);
4213 /* If the addresses are identical, this is a store/memset of zero. */
4214 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4215 o->in2 = tcg_const_i64(0);
4217 l++;
4218 while (l >= 8) {
4219 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4220 l -= 8;
4221 if (l > 0) {
4222 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4225 if (l >= 4) {
4226 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4227 l -= 4;
4228 if (l > 0) {
4229 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4232 if (l >= 2) {
4233 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4234 l -= 2;
4235 if (l > 0) {
4236 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4239 if (l) {
4240 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4242 gen_op_movi_cc(s, 0);
4243 return NO_EXIT;
4246 /* But in general we'll defer to a helper. */
4247 o->in2 = get_address(s, 0, b2, d2);
4248 t32 = tcg_const_i32(l);
4249 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4250 tcg_temp_free_i32(t32);
4251 set_cc_static(s);
4252 return NO_EXIT;
4255 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
4257 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4258 return NO_EXIT;
4261 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
4263 int shift = s->insn->data & 0xff;
4264 int size = s->insn->data >> 8;
4265 uint64_t mask = ((1ull << size) - 1) << shift;
4267 assert(!o->g_in2);
4268 tcg_gen_shli_i64(o->in2, o->in2, shift);
4269 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4271 /* Produce the CC from only the bits manipulated. */
4272 tcg_gen_andi_i64(cc_dst, o->out, mask);
4273 set_cc_nz_u64(s, cc_dst);
4274 return NO_EXIT;
4277 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
4279 o->out = tcg_const_i64(0);
4280 return NO_EXIT;
4283 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
4285 o->out = tcg_const_i64(0);
4286 o->out2 = o->out;
4287 o->g_out2 = true;
4288 return NO_EXIT;
4291 /* ====================================================================== */
4292 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4293 the original inputs), update the various cc data structures in order to
4294 be able to compute the new condition code. */
4296 static void cout_abs32(DisasContext *s, DisasOps *o)
4298 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4301 static void cout_abs64(DisasContext *s, DisasOps *o)
4303 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4306 static void cout_adds32(DisasContext *s, DisasOps *o)
4308 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4311 static void cout_adds64(DisasContext *s, DisasOps *o)
4313 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4316 static void cout_addu32(DisasContext *s, DisasOps *o)
4318 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
4321 static void cout_addu64(DisasContext *s, DisasOps *o)
4323 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
4326 static void cout_addc32(DisasContext *s, DisasOps *o)
4328 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
4331 static void cout_addc64(DisasContext *s, DisasOps *o)
4333 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
4336 static void cout_cmps32(DisasContext *s, DisasOps *o)
4338 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4341 static void cout_cmps64(DisasContext *s, DisasOps *o)
4343 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4346 static void cout_cmpu32(DisasContext *s, DisasOps *o)
4348 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
4351 static void cout_cmpu64(DisasContext *s, DisasOps *o)
4353 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
4356 static void cout_f32(DisasContext *s, DisasOps *o)
4358 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
4361 static void cout_f64(DisasContext *s, DisasOps *o)
4363 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
4366 static void cout_f128(DisasContext *s, DisasOps *o)
4368 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
4371 static void cout_nabs32(DisasContext *s, DisasOps *o)
4373 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
4376 static void cout_nabs64(DisasContext *s, DisasOps *o)
4378 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
4381 static void cout_neg32(DisasContext *s, DisasOps *o)
4383 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
4386 static void cout_neg64(DisasContext *s, DisasOps *o)
4388 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
4391 static void cout_nz32(DisasContext *s, DisasOps *o)
4393 tcg_gen_ext32u_i64(cc_dst, o->out);
4394 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
4397 static void cout_nz64(DisasContext *s, DisasOps *o)
4399 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
4402 static void cout_s32(DisasContext *s, DisasOps *o)
4404 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
4407 static void cout_s64(DisasContext *s, DisasOps *o)
4409 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
4412 static void cout_subs32(DisasContext *s, DisasOps *o)
4414 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
4417 static void cout_subs64(DisasContext *s, DisasOps *o)
4419 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
4422 static void cout_subu32(DisasContext *s, DisasOps *o)
4424 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
4427 static void cout_subu64(DisasContext *s, DisasOps *o)
4429 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
4432 static void cout_subb32(DisasContext *s, DisasOps *o)
4434 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
4437 static void cout_subb64(DisasContext *s, DisasOps *o)
4439 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
4442 static void cout_tm32(DisasContext *s, DisasOps *o)
4444 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
4447 static void cout_tm64(DisasContext *s, DisasOps *o)
4449 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
4452 /* ====================================================================== */
4453 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4454 with the TCG register to which we will write. Used in combination with
4455 the "wout" generators, in some cases we need a new temporary, and in
4456 some cases we can write to a TCG global. */
4458 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
4460 o->out = tcg_temp_new_i64();
4462 #define SPEC_prep_new 0
4464 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
4466 o->out = tcg_temp_new_i64();
4467 o->out2 = tcg_temp_new_i64();
4469 #define SPEC_prep_new_P 0
4471 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4473 o->out = regs[get_field(f, r1)];
4474 o->g_out = true;
4476 #define SPEC_prep_r1 0
4478 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
4480 int r1 = get_field(f, r1);
4481 o->out = regs[r1];
4482 o->out2 = regs[r1 + 1];
4483 o->g_out = o->g_out2 = true;
4485 #define SPEC_prep_r1_P SPEC_r1_even
4487 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4489 o->out = fregs[get_field(f, r1)];
4490 o->g_out = true;
4492 #define SPEC_prep_f1 0
4494 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4496 int r1 = get_field(f, r1);
4497 o->out = fregs[r1];
4498 o->out2 = fregs[r1 + 2];
4499 o->g_out = o->g_out2 = true;
4501 #define SPEC_prep_x1 SPEC_r1_f128
4503 /* ====================================================================== */
4504 /* The "Write OUTput" generators. These generally perform some non-trivial
4505 copy of data to TCG globals, or to main memory. The trivial cases are
4506 generally handled by having a "prep" generator install the TCG global
4507 as the destination of the operation. */
4509 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4511 store_reg(get_field(f, r1), o->out);
4513 #define SPEC_wout_r1 0
4515 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4517 int r1 = get_field(f, r1);
4518 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
4520 #define SPEC_wout_r1_8 0
4522 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4524 int r1 = get_field(f, r1);
4525 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
4527 #define SPEC_wout_r1_16 0
4529 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4531 store_reg32_i64(get_field(f, r1), o->out);
4533 #define SPEC_wout_r1_32 0
4535 static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
4537 store_reg32h_i64(get_field(f, r1), o->out);
4539 #define SPEC_wout_r1_32h 0
4541 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4543 int r1 = get_field(f, r1);
4544 store_reg32_i64(r1, o->out);
4545 store_reg32_i64(r1 + 1, o->out2);
4547 #define SPEC_wout_r1_P32 SPEC_r1_even
4549 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4551 int r1 = get_field(f, r1);
4552 store_reg32_i64(r1 + 1, o->out);
4553 tcg_gen_shri_i64(o->out, o->out, 32);
4554 store_reg32_i64(r1, o->out);
4556 #define SPEC_wout_r1_D32 SPEC_r1_even
4558 static void wout_r3_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4560 int r3 = get_field(f, r3);
4561 store_reg32_i64(r3, o->out);
4562 store_reg32_i64(r3 + 1, o->out2);
4564 #define SPEC_wout_r3_P32 SPEC_r3_even
4566 static void wout_r3_P64(DisasContext *s, DisasFields *f, DisasOps *o)
4568 int r3 = get_field(f, r3);
4569 store_reg(r3, o->out);
4570 store_reg(r3 + 1, o->out2);
4572 #define SPEC_wout_r3_P64 SPEC_r3_even
4574 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4576 store_freg32_i64(get_field(f, r1), o->out);
4578 #define SPEC_wout_e1 0
4580 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4582 store_freg(get_field(f, r1), o->out);
4584 #define SPEC_wout_f1 0
4586 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4588 int f1 = get_field(s->fields, r1);
4589 store_freg(f1, o->out);
4590 store_freg(f1 + 2, o->out2);
4592 #define SPEC_wout_x1 SPEC_r1_f128
4594 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4596 if (get_field(f, r1) != get_field(f, r2)) {
4597 store_reg32_i64(get_field(f, r1), o->out);
4600 #define SPEC_wout_cond_r1r2_32 0
4602 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
4604 if (get_field(f, r1) != get_field(f, r2)) {
4605 store_freg32_i64(get_field(f, r1), o->out);
4608 #define SPEC_wout_cond_e1e2 0
4610 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4612 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
4614 #define SPEC_wout_m1_8 0
4616 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4618 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
4620 #define SPEC_wout_m1_16 0
4622 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4624 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4626 #define SPEC_wout_m1_32 0
4628 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4630 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4632 #define SPEC_wout_m1_64 0
4634 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4636 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
4638 #define SPEC_wout_m2_32 0
4640 static void wout_in2_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4642 store_reg(get_field(f, r1), o->in2);
4644 #define SPEC_wout_in2_r1 0
4646 static void wout_in2_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4648 store_reg32_i64(get_field(f, r1), o->in2);
4650 #define SPEC_wout_in2_r1_32 0
4652 /* ====================================================================== */
4653 /* The "INput 1" generators. These load the first operand to an insn. */
4655 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4657 o->in1 = load_reg(get_field(f, r1));
4659 #define SPEC_in1_r1 0
4661 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4663 o->in1 = regs[get_field(f, r1)];
4664 o->g_in1 = true;
4666 #define SPEC_in1_r1_o 0
4668 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4670 o->in1 = tcg_temp_new_i64();
4671 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
4673 #define SPEC_in1_r1_32s 0
4675 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4677 o->in1 = tcg_temp_new_i64();
4678 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
4680 #define SPEC_in1_r1_32u 0
4682 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4684 o->in1 = tcg_temp_new_i64();
4685 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
4687 #define SPEC_in1_r1_sr32 0
4689 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
4691 o->in1 = load_reg(get_field(f, r1) + 1);
4693 #define SPEC_in1_r1p1 SPEC_r1_even
4695 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4697 o->in1 = tcg_temp_new_i64();
4698 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
4700 #define SPEC_in1_r1p1_32s SPEC_r1_even
4702 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4704 o->in1 = tcg_temp_new_i64();
4705 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
4707 #define SPEC_in1_r1p1_32u SPEC_r1_even
4709 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4711 int r1 = get_field(f, r1);
4712 o->in1 = tcg_temp_new_i64();
4713 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
4715 #define SPEC_in1_r1_D32 SPEC_r1_even
4717 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4719 o->in1 = load_reg(get_field(f, r2));
4721 #define SPEC_in1_r2 0
4723 static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4725 o->in1 = tcg_temp_new_i64();
4726 tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
4728 #define SPEC_in1_r2_sr32 0
4730 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4732 o->in1 = load_reg(get_field(f, r3));
4734 #define SPEC_in1_r3 0
4736 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4738 o->in1 = regs[get_field(f, r3)];
4739 o->g_in1 = true;
4741 #define SPEC_in1_r3_o 0
4743 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4745 o->in1 = tcg_temp_new_i64();
4746 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
4748 #define SPEC_in1_r3_32s 0
4750 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4752 o->in1 = tcg_temp_new_i64();
4753 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
4755 #define SPEC_in1_r3_32u 0
4757 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4759 int r3 = get_field(f, r3);
4760 o->in1 = tcg_temp_new_i64();
4761 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
4763 #define SPEC_in1_r3_D32 SPEC_r3_even
4765 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4767 o->in1 = load_freg32_i64(get_field(f, r1));
4769 #define SPEC_in1_e1 0
4771 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4773 o->in1 = fregs[get_field(f, r1)];
4774 o->g_in1 = true;
4776 #define SPEC_in1_f1_o 0
4778 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4780 int r1 = get_field(f, r1);
4781 o->out = fregs[r1];
4782 o->out2 = fregs[r1 + 2];
4783 o->g_out = o->g_out2 = true;
4785 #define SPEC_in1_x1_o SPEC_r1_f128
4787 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4789 o->in1 = fregs[get_field(f, r3)];
4790 o->g_in1 = true;
4792 #define SPEC_in1_f3_o 0
4794 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
4796 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
4798 #define SPEC_in1_la1 0
4800 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
4802 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4803 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4805 #define SPEC_in1_la2 0
4807 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4809 in1_la1(s, f, o);
4810 o->in1 = tcg_temp_new_i64();
4811 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
4813 #define SPEC_in1_m1_8u 0
4815 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4817 in1_la1(s, f, o);
4818 o->in1 = tcg_temp_new_i64();
4819 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
4821 #define SPEC_in1_m1_16s 0
4823 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4825 in1_la1(s, f, o);
4826 o->in1 = tcg_temp_new_i64();
4827 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
4829 #define SPEC_in1_m1_16u 0
4831 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4833 in1_la1(s, f, o);
4834 o->in1 = tcg_temp_new_i64();
4835 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4837 #define SPEC_in1_m1_32s 0
4839 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4841 in1_la1(s, f, o);
4842 o->in1 = tcg_temp_new_i64();
4843 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4845 #define SPEC_in1_m1_32u 0
4847 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4849 in1_la1(s, f, o);
4850 o->in1 = tcg_temp_new_i64();
4851 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4853 #define SPEC_in1_m1_64 0
4855 /* ====================================================================== */
4856 /* The "INput 2" generators. These load the second operand to an insn. */
4858 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4860 o->in2 = regs[get_field(f, r1)];
4861 o->g_in2 = true;
4863 #define SPEC_in2_r1_o 0
4865 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4867 o->in2 = tcg_temp_new_i64();
4868 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
4870 #define SPEC_in2_r1_16u 0
4872 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4874 o->in2 = tcg_temp_new_i64();
4875 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
4877 #define SPEC_in2_r1_32u 0
4879 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4881 int r1 = get_field(f, r1);
4882 o->in2 = tcg_temp_new_i64();
4883 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
4885 #define SPEC_in2_r1_D32 SPEC_r1_even
4887 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4889 o->in2 = load_reg(get_field(f, r2));
4891 #define SPEC_in2_r2 0
4893 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4895 o->in2 = regs[get_field(f, r2)];
4896 o->g_in2 = true;
4898 #define SPEC_in2_r2_o 0
4900 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4902 int r2 = get_field(f, r2);
4903 if (r2 != 0) {
4904 o->in2 = load_reg(r2);
4907 #define SPEC_in2_r2_nz 0
4909 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4911 o->in2 = tcg_temp_new_i64();
4912 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4914 #define SPEC_in2_r2_8s 0
4916 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4918 o->in2 = tcg_temp_new_i64();
4919 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4921 #define SPEC_in2_r2_8u 0
4923 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4925 o->in2 = tcg_temp_new_i64();
4926 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4928 #define SPEC_in2_r2_16s 0
4930 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4932 o->in2 = tcg_temp_new_i64();
4933 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4935 #define SPEC_in2_r2_16u 0
4937 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4939 o->in2 = load_reg(get_field(f, r3));
4941 #define SPEC_in2_r3 0
4943 static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4945 o->in2 = tcg_temp_new_i64();
4946 tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
4948 #define SPEC_in2_r3_sr32 0
4950 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4952 o->in2 = tcg_temp_new_i64();
4953 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4955 #define SPEC_in2_r2_32s 0
4957 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4959 o->in2 = tcg_temp_new_i64();
4960 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4962 #define SPEC_in2_r2_32u 0
4964 static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4966 o->in2 = tcg_temp_new_i64();
4967 tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
4969 #define SPEC_in2_r2_sr32 0
4971 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
4973 o->in2 = load_freg32_i64(get_field(f, r2));
4975 #define SPEC_in2_e2 0
4977 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4979 o->in2 = fregs[get_field(f, r2)];
4980 o->g_in2 = true;
4982 #define SPEC_in2_f2_o 0
4984 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4986 int r2 = get_field(f, r2);
4987 o->in1 = fregs[r2];
4988 o->in2 = fregs[r2 + 2];
4989 o->g_in1 = o->g_in2 = true;
4991 #define SPEC_in2_x2_o SPEC_r2_f128
4993 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
4995 o->in2 = get_address(s, 0, get_field(f, r2), 0);
4997 #define SPEC_in2_ra2 0
4999 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
5001 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5002 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5004 #define SPEC_in2_a2 0
5006 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
5008 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
5010 #define SPEC_in2_ri2 0
5012 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
5014 help_l2_shift(s, f, o, 31);
5016 #define SPEC_in2_sh32 0
5018 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
5020 help_l2_shift(s, f, o, 63);
5022 #define SPEC_in2_sh64 0
5024 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5026 in2_a2(s, f, o);
5027 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5029 #define SPEC_in2_m2_8u 0
5031 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5033 in2_a2(s, f, o);
5034 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5036 #define SPEC_in2_m2_16s 0
5038 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5040 in2_a2(s, f, o);
5041 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5043 #define SPEC_in2_m2_16u 0
5045 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5047 in2_a2(s, f, o);
5048 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5050 #define SPEC_in2_m2_32s 0
5052 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5054 in2_a2(s, f, o);
5055 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5057 #define SPEC_in2_m2_32u 0
5059 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5061 in2_a2(s, f, o);
5062 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5064 #define SPEC_in2_m2_64 0
5066 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5068 in2_ri2(s, f, o);
5069 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5071 #define SPEC_in2_mri2_16u 0
5073 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5075 in2_ri2(s, f, o);
5076 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5078 #define SPEC_in2_mri2_32s 0
5080 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5082 in2_ri2(s, f, o);
5083 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5085 #define SPEC_in2_mri2_32u 0
5087 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5089 in2_ri2(s, f, o);
5090 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5092 #define SPEC_in2_mri2_64 0
5094 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
5096 o->in2 = tcg_const_i64(get_field(f, i2));
5098 #define SPEC_in2_i2 0
5100 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5102 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
5104 #define SPEC_in2_i2_8u 0
5106 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5108 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
5110 #define SPEC_in2_i2_16u 0
5112 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5114 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
5116 #define SPEC_in2_i2_32u 0
5118 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5120 uint64_t i2 = (uint16_t)get_field(f, i2);
5121 o->in2 = tcg_const_i64(i2 << s->insn->data);
5123 #define SPEC_in2_i2_16u_shl 0
5125 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5127 uint64_t i2 = (uint32_t)get_field(f, i2);
5128 o->in2 = tcg_const_i64(i2 << s->insn->data);
5130 #define SPEC_in2_i2_32u_shl 0
5132 #ifndef CONFIG_USER_ONLY
5133 static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
5135 o->in2 = tcg_const_i64(s->fields->raw_insn);
5137 #define SPEC_in2_insn 0
5138 #endif
5140 /* ====================================================================== */
5142 /* Find opc within the table of insns. This is formulated as a switch
5143 statement so that (1) we get compile-time notice of cut-paste errors
5144 for duplicated opcodes, and (2) the compiler generates the binary
5145 search tree, rather than us having to post-process the table. */
5147 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5148 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5150 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5152 enum DisasInsnEnum {
5153 #include "insn-data.def"
5156 #undef D
5157 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5158 .opc = OPC, \
5159 .fmt = FMT_##FT, \
5160 .fac = FAC_##FC, \
5161 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5162 .name = #NM, \
5163 .help_in1 = in1_##I1, \
5164 .help_in2 = in2_##I2, \
5165 .help_prep = prep_##P, \
5166 .help_wout = wout_##W, \
5167 .help_cout = cout_##CC, \
5168 .help_op = op_##OP, \
5169 .data = D \
5172 /* Allow 0 to be used for NULL in the table below. */
5173 #define in1_0 NULL
5174 #define in2_0 NULL
5175 #define prep_0 NULL
5176 #define wout_0 NULL
5177 #define cout_0 NULL
5178 #define op_0 NULL
5180 #define SPEC_in1_0 0
5181 #define SPEC_in2_0 0
5182 #define SPEC_prep_0 0
5183 #define SPEC_wout_0 0
5185 static const DisasInsn insn_info[] = {
5186 #include "insn-data.def"
5189 #undef D
5190 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5191 case OPC: return &insn_info[insn_ ## NM];
5193 static const DisasInsn *lookup_opc(uint16_t opc)
5195 switch (opc) {
5196 #include "insn-data.def"
5197 default:
5198 return NULL;
5202 #undef D
5203 #undef C
5205 /* Extract a field from the insn. The INSN should be left-aligned in
5206 the uint64_t so that we can more easily utilize the big-bit-endian
5207 definitions we extract from the Principals of Operation. */
5209 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
5211 uint32_t r, m;
5213 if (f->size == 0) {
5214 return;
5217 /* Zero extract the field from the insn. */
5218 r = (insn << f->beg) >> (64 - f->size);
5220 /* Sign-extend, or un-swap the field as necessary. */
5221 switch (f->type) {
5222 case 0: /* unsigned */
5223 break;
5224 case 1: /* signed */
5225 assert(f->size <= 32);
5226 m = 1u << (f->size - 1);
5227 r = (r ^ m) - m;
5228 break;
5229 case 2: /* dl+dh split, signed 20 bit. */
5230 r = ((int8_t)r << 12) | (r >> 8);
5231 break;
5232 default:
5233 abort();
5236 /* Validate that the "compressed" encoding we selected above is valid.
5237 I.e. we havn't make two different original fields overlap. */
5238 assert(((o->presentC >> f->indexC) & 1) == 0);
5239 o->presentC |= 1 << f->indexC;
5240 o->presentO |= 1 << f->indexO;
5242 o->c[f->indexC] = r;
5245 /* Lookup the insn at the current PC, extracting the operands into O and
5246 returning the info struct for the insn. Returns NULL for invalid insn. */
5248 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
5249 DisasFields *f)
5251 uint64_t insn, pc = s->pc;
5252 int op, op2, ilen;
5253 const DisasInsn *info;
5255 if (unlikely(s->ex_value)) {
5256 /* Drop the EX data now, so that it's clear on exception paths. */
5257 TCGv_i64 zero = tcg_const_i64(0);
5258 tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
5259 tcg_temp_free_i64(zero);
5261 /* Extract the values saved by EXECUTE. */
5262 insn = s->ex_value & 0xffffffffffff0000ull;
5263 ilen = s->ex_value & 0xf;
5264 op = insn >> 56;
5265 } else {
5266 insn = ld_code2(env, pc);
5267 op = (insn >> 8) & 0xff;
5268 ilen = get_ilen(op);
5269 switch (ilen) {
5270 case 2:
5271 insn = insn << 48;
5272 break;
5273 case 4:
5274 insn = ld_code4(env, pc) << 32;
5275 break;
5276 case 6:
5277 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
5278 break;
5279 default:
5280 g_assert_not_reached();
5283 s->next_pc = s->pc + ilen;
5284 s->ilen = ilen;
5286 /* We can't actually determine the insn format until we've looked up
5287 the full insn opcode. Which we can't do without locating the
5288 secondary opcode. Assume by default that OP2 is at bit 40; for
5289 those smaller insns that don't actually have a secondary opcode
5290 this will correctly result in OP2 = 0. */
5291 switch (op) {
5292 case 0x01: /* E */
5293 case 0x80: /* S */
5294 case 0x82: /* S */
5295 case 0x93: /* S */
5296 case 0xb2: /* S, RRF, RRE */
5297 case 0xb3: /* RRE, RRD, RRF */
5298 case 0xb9: /* RRE, RRF */
5299 case 0xe5: /* SSE, SIL */
5300 op2 = (insn << 8) >> 56;
5301 break;
5302 case 0xa5: /* RI */
5303 case 0xa7: /* RI */
5304 case 0xc0: /* RIL */
5305 case 0xc2: /* RIL */
5306 case 0xc4: /* RIL */
5307 case 0xc6: /* RIL */
5308 case 0xc8: /* SSF */
5309 case 0xcc: /* RIL */
5310 op2 = (insn << 12) >> 60;
5311 break;
5312 case 0xd0 ... 0xdf: /* SS */
5313 case 0xe1: /* SS */
5314 case 0xe2: /* SS */
5315 case 0xe8: /* SS */
5316 case 0xe9: /* SS */
5317 case 0xea: /* SS */
5318 case 0xee ... 0xf3: /* SS */
5319 case 0xf8 ... 0xfd: /* SS */
5320 op2 = 0;
5321 break;
5322 default:
5323 op2 = (insn << 40) >> 56;
5324 break;
5327 memset(f, 0, sizeof(*f));
5328 f->raw_insn = insn;
5329 f->op = op;
5330 f->op2 = op2;
5332 /* Lookup the instruction. */
5333 info = lookup_opc(op << 8 | op2);
5335 /* If we found it, extract the operands. */
5336 if (info != NULL) {
5337 DisasFormat fmt = info->fmt;
5338 int i;
5340 for (i = 0; i < NUM_C_FIELD; ++i) {
5341 extract_field(f, &format_info[fmt].op[i], insn);
5344 return info;
5347 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
5349 const DisasInsn *insn;
5350 ExitStatus ret = NO_EXIT;
5351 DisasFields f;
5352 DisasOps o;
5354 /* Search for the insn in the table. */
5355 insn = extract_insn(env, s, &f);
5357 /* Not found means unimplemented/illegal opcode. */
5358 if (insn == NULL) {
5359 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
5360 f.op, f.op2);
5361 gen_illegal_opcode(s);
5362 return EXIT_NORETURN;
5365 #ifndef CONFIG_USER_ONLY
5366 if (s->tb->flags & FLAG_MASK_PER) {
5367 TCGv_i64 addr = tcg_const_i64(s->pc);
5368 gen_helper_per_ifetch(cpu_env, addr);
5369 tcg_temp_free_i64(addr);
5371 #endif
5373 /* Check for insn specification exceptions. */
5374 if (insn->spec) {
5375 int spec = insn->spec, excp = 0, r;
5377 if (spec & SPEC_r1_even) {
5378 r = get_field(&f, r1);
5379 if (r & 1) {
5380 excp = PGM_SPECIFICATION;
5383 if (spec & SPEC_r2_even) {
5384 r = get_field(&f, r2);
5385 if (r & 1) {
5386 excp = PGM_SPECIFICATION;
5389 if (spec & SPEC_r3_even) {
5390 r = get_field(&f, r3);
5391 if (r & 1) {
5392 excp = PGM_SPECIFICATION;
5395 if (spec & SPEC_r1_f128) {
5396 r = get_field(&f, r1);
5397 if (r > 13) {
5398 excp = PGM_SPECIFICATION;
5401 if (spec & SPEC_r2_f128) {
5402 r = get_field(&f, r2);
5403 if (r > 13) {
5404 excp = PGM_SPECIFICATION;
5407 if (excp) {
5408 gen_program_exception(s, excp);
5409 return EXIT_NORETURN;
5413 /* Set up the strutures we use to communicate with the helpers. */
5414 s->insn = insn;
5415 s->fields = &f;
5416 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
5417 TCGV_UNUSED_I64(o.out);
5418 TCGV_UNUSED_I64(o.out2);
5419 TCGV_UNUSED_I64(o.in1);
5420 TCGV_UNUSED_I64(o.in2);
5421 TCGV_UNUSED_I64(o.addr1);
5423 /* Implement the instruction. */
5424 if (insn->help_in1) {
5425 insn->help_in1(s, &f, &o);
5427 if (insn->help_in2) {
5428 insn->help_in2(s, &f, &o);
5430 if (insn->help_prep) {
5431 insn->help_prep(s, &f, &o);
5433 if (insn->help_op) {
5434 ret = insn->help_op(s, &o);
5436 if (insn->help_wout) {
5437 insn->help_wout(s, &f, &o);
5439 if (insn->help_cout) {
5440 insn->help_cout(s, &o);
5443 /* Free any temporaries created by the helpers. */
5444 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
5445 tcg_temp_free_i64(o.out);
5447 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
5448 tcg_temp_free_i64(o.out2);
5450 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
5451 tcg_temp_free_i64(o.in1);
5453 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
5454 tcg_temp_free_i64(o.in2);
5456 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
5457 tcg_temp_free_i64(o.addr1);
5460 #ifndef CONFIG_USER_ONLY
5461 if (s->tb->flags & FLAG_MASK_PER) {
5462 /* An exception might be triggered, save PSW if not already done. */
5463 if (ret == NO_EXIT || ret == EXIT_PC_STALE) {
5464 tcg_gen_movi_i64(psw_addr, s->next_pc);
5467 /* Save off cc. */
5468 update_cc_op(s);
5470 /* Call the helper to check for a possible PER exception. */
5471 gen_helper_per_check_exception(cpu_env);
5473 #endif
5475 /* Advance to the next instruction. */
5476 s->pc = s->next_pc;
5477 return ret;
5480 void gen_intermediate_code(CPUS390XState *env, struct TranslationBlock *tb)
5482 S390CPU *cpu = s390_env_get_cpu(env);
5483 CPUState *cs = CPU(cpu);
5484 DisasContext dc;
5485 target_ulong pc_start;
5486 uint64_t next_page_start;
5487 int num_insns, max_insns;
5488 ExitStatus status;
5489 bool do_debug;
5491 pc_start = tb->pc;
5493 /* 31-bit mode */
5494 if (!(tb->flags & FLAG_MASK_64)) {
5495 pc_start &= 0x7fffffff;
5498 dc.tb = tb;
5499 dc.pc = pc_start;
5500 dc.cc_op = CC_OP_DYNAMIC;
5501 dc.ex_value = tb->cs_base;
5502 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
5504 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
5506 num_insns = 0;
5507 max_insns = tb->cflags & CF_COUNT_MASK;
5508 if (max_insns == 0) {
5509 max_insns = CF_COUNT_MASK;
5511 if (max_insns > TCG_MAX_INSNS) {
5512 max_insns = TCG_MAX_INSNS;
5515 gen_tb_start(tb);
5517 do {
5518 tcg_gen_insn_start(dc.pc, dc.cc_op);
5519 num_insns++;
5521 if (unlikely(cpu_breakpoint_test(cs, dc.pc, BP_ANY))) {
5522 status = EXIT_PC_STALE;
5523 do_debug = true;
5524 /* The address covered by the breakpoint must be included in
5525 [tb->pc, tb->pc + tb->size) in order to for it to be
5526 properly cleared -- thus we increment the PC here so that
5527 the logic setting tb->size below does the right thing. */
5528 dc.pc += 2;
5529 break;
5532 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5533 gen_io_start();
5536 status = translate_one(env, &dc);
5538 /* If we reach a page boundary, are single stepping,
5539 or exhaust instruction count, stop generation. */
5540 if (status == NO_EXIT
5541 && (dc.pc >= next_page_start
5542 || tcg_op_buf_full()
5543 || num_insns >= max_insns
5544 || singlestep
5545 || cs->singlestep_enabled
5546 || dc.ex_value)) {
5547 status = EXIT_PC_STALE;
5549 } while (status == NO_EXIT);
5551 if (tb->cflags & CF_LAST_IO) {
5552 gen_io_end();
5555 switch (status) {
5556 case EXIT_GOTO_TB:
5557 case EXIT_NORETURN:
5558 break;
5559 case EXIT_PC_STALE:
5560 update_psw_addr(&dc);
5561 /* FALLTHRU */
5562 case EXIT_PC_UPDATED:
5563 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5564 cc op type is in env */
5565 update_cc_op(&dc);
5566 /* FALLTHRU */
5567 case EXIT_PC_CC_UPDATED:
5568 /* Exit the TB, either by raising a debug exception or by return. */
5569 if (do_debug) {
5570 gen_exception(EXCP_DEBUG);
5571 } else if (use_exit_tb(&dc)) {
5572 tcg_gen_exit_tb(0);
5573 } else {
5574 tcg_gen_lookup_and_goto_ptr(psw_addr);
5576 break;
5577 default:
5578 abort();
5581 gen_tb_end(tb, num_insns);
5583 tb->size = dc.pc - pc_start;
5584 tb->icount = num_insns;
5586 #if defined(S390X_DEBUG_DISAS)
5587 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
5588 && qemu_log_in_addr_range(pc_start)) {
5589 qemu_log_lock();
5590 if (unlikely(dc.ex_value)) {
5591 /* ??? Unfortunately log_target_disas can't use host memory. */
5592 qemu_log("IN: EXECUTE %016" PRIx64 "\n", dc.ex_value);
5593 } else {
5594 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5595 log_target_disas(cs, pc_start, dc.pc - pc_start, 1);
5596 qemu_log("\n");
5598 qemu_log_unlock();
5600 #endif
5603 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
5604 target_ulong *data)
5606 int cc_op = data[1];
5607 env->psw.addr = data[0];
5608 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
5609 env->cc_op = cc_op;