target/s390x: fix COMPARE LOGICAL LONG EXTENDED
[qemu/ar7.git] / target / s390x / translate.c
blobecd0a91c0400df1892802304b84d19c8857db70a
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "disas/disas.h"
34 #include "exec/exec-all.h"
35 #include "tcg-op.h"
36 #include "qemu/log.h"
37 #include "qemu/host-utils.h"
38 #include "exec/cpu_ldst.h"
40 /* global register indexes */
41 static TCGv_env cpu_env;
43 #include "exec/gen-icount.h"
44 #include "exec/helper-proto.h"
45 #include "exec/helper-gen.h"
47 #include "trace-tcg.h"
48 #include "exec/log.h"
51 /* Information that (most) every instruction needs to manipulate. */
52 typedef struct DisasContext DisasContext;
53 typedef struct DisasInsn DisasInsn;
54 typedef struct DisasFields DisasFields;
56 struct DisasContext {
57 struct TranslationBlock *tb;
58 const DisasInsn *insn;
59 DisasFields *fields;
60 uint64_t ex_value;
61 uint64_t pc, next_pc;
62 uint32_t ilen;
63 enum cc_op cc_op;
64 bool singlestep_enabled;
67 /* Information carried about a condition to be evaluated. */
68 typedef struct {
69 TCGCond cond:8;
70 bool is_64;
71 bool g1;
72 bool g2;
73 union {
74 struct { TCGv_i64 a, b; } s64;
75 struct { TCGv_i32 a, b; } s32;
76 } u;
77 } DisasCompare;
79 #define DISAS_EXCP 4
81 #ifdef DEBUG_INLINE_BRANCHES
82 static uint64_t inline_branch_hit[CC_OP_MAX];
83 static uint64_t inline_branch_miss[CC_OP_MAX];
84 #endif
86 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
88 if (!(s->tb->flags & FLAG_MASK_64)) {
89 if (s->tb->flags & FLAG_MASK_32) {
90 return pc | 0x80000000;
93 return pc;
96 void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
97 int flags)
99 S390CPU *cpu = S390_CPU(cs);
100 CPUS390XState *env = &cpu->env;
101 int i;
103 if (env->cc_op > 3) {
104 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
105 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
106 } else {
107 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
108 env->psw.mask, env->psw.addr, env->cc_op);
111 for (i = 0; i < 16; i++) {
112 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
113 if ((i % 4) == 3) {
114 cpu_fprintf(f, "\n");
115 } else {
116 cpu_fprintf(f, " ");
120 for (i = 0; i < 16; i++) {
121 cpu_fprintf(f, "F%02d=%016" PRIx64, i, get_freg(env, i)->ll);
122 if ((i % 4) == 3) {
123 cpu_fprintf(f, "\n");
124 } else {
125 cpu_fprintf(f, " ");
129 for (i = 0; i < 32; i++) {
130 cpu_fprintf(f, "V%02d=%016" PRIx64 "%016" PRIx64, i,
131 env->vregs[i][0].ll, env->vregs[i][1].ll);
132 cpu_fprintf(f, (i % 2) ? "\n" : " ");
135 #ifndef CONFIG_USER_ONLY
136 for (i = 0; i < 16; i++) {
137 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
138 if ((i % 4) == 3) {
139 cpu_fprintf(f, "\n");
140 } else {
141 cpu_fprintf(f, " ");
144 #endif
146 #ifdef DEBUG_INLINE_BRANCHES
147 for (i = 0; i < CC_OP_MAX; i++) {
148 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
149 inline_branch_miss[i], inline_branch_hit[i]);
151 #endif
153 cpu_fprintf(f, "\n");
156 static TCGv_i64 psw_addr;
157 static TCGv_i64 psw_mask;
158 static TCGv_i64 gbea;
160 static TCGv_i32 cc_op;
161 static TCGv_i64 cc_src;
162 static TCGv_i64 cc_dst;
163 static TCGv_i64 cc_vr;
165 static char cpu_reg_names[32][4];
166 static TCGv_i64 regs[16];
167 static TCGv_i64 fregs[16];
169 void s390x_translate_init(void)
171 int i;
173 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
174 tcg_ctx.tcg_env = cpu_env;
175 psw_addr = tcg_global_mem_new_i64(cpu_env,
176 offsetof(CPUS390XState, psw.addr),
177 "psw_addr");
178 psw_mask = tcg_global_mem_new_i64(cpu_env,
179 offsetof(CPUS390XState, psw.mask),
180 "psw_mask");
181 gbea = tcg_global_mem_new_i64(cpu_env,
182 offsetof(CPUS390XState, gbea),
183 "gbea");
185 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
186 "cc_op");
187 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
188 "cc_src");
189 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
190 "cc_dst");
191 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
192 "cc_vr");
194 for (i = 0; i < 16; i++) {
195 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
196 regs[i] = tcg_global_mem_new(cpu_env,
197 offsetof(CPUS390XState, regs[i]),
198 cpu_reg_names[i]);
201 for (i = 0; i < 16; i++) {
202 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
203 fregs[i] = tcg_global_mem_new(cpu_env,
204 offsetof(CPUS390XState, vregs[i][0].d),
205 cpu_reg_names[i + 16]);
209 static TCGv_i64 load_reg(int reg)
211 TCGv_i64 r = tcg_temp_new_i64();
212 tcg_gen_mov_i64(r, regs[reg]);
213 return r;
216 static TCGv_i64 load_freg32_i64(int reg)
218 TCGv_i64 r = tcg_temp_new_i64();
219 tcg_gen_shri_i64(r, fregs[reg], 32);
220 return r;
223 static void store_reg(int reg, TCGv_i64 v)
225 tcg_gen_mov_i64(regs[reg], v);
228 static void store_freg(int reg, TCGv_i64 v)
230 tcg_gen_mov_i64(fregs[reg], v);
233 static void store_reg32_i64(int reg, TCGv_i64 v)
235 /* 32 bit register writes keep the upper half */
236 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
239 static void store_reg32h_i64(int reg, TCGv_i64 v)
241 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
244 static void store_freg32_i64(int reg, TCGv_i64 v)
246 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
249 static void return_low128(TCGv_i64 dest)
251 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
254 static void update_psw_addr(DisasContext *s)
256 /* psw.addr */
257 tcg_gen_movi_i64(psw_addr, s->pc);
260 static void per_branch(DisasContext *s, bool to_next)
262 #ifndef CONFIG_USER_ONLY
263 tcg_gen_movi_i64(gbea, s->pc);
265 if (s->tb->flags & FLAG_MASK_PER) {
266 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->next_pc) : psw_addr;
267 gen_helper_per_branch(cpu_env, gbea, next_pc);
268 if (to_next) {
269 tcg_temp_free_i64(next_pc);
272 #endif
275 static void per_branch_cond(DisasContext *s, TCGCond cond,
276 TCGv_i64 arg1, TCGv_i64 arg2)
278 #ifndef CONFIG_USER_ONLY
279 if (s->tb->flags & FLAG_MASK_PER) {
280 TCGLabel *lab = gen_new_label();
281 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
283 tcg_gen_movi_i64(gbea, s->pc);
284 gen_helper_per_branch(cpu_env, gbea, psw_addr);
286 gen_set_label(lab);
287 } else {
288 TCGv_i64 pc = tcg_const_i64(s->pc);
289 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
290 tcg_temp_free_i64(pc);
292 #endif
295 static void per_breaking_event(DisasContext *s)
297 tcg_gen_movi_i64(gbea, s->pc);
300 static void update_cc_op(DisasContext *s)
302 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
303 tcg_gen_movi_i32(cc_op, s->cc_op);
307 static void potential_page_fault(DisasContext *s)
309 update_psw_addr(s);
310 update_cc_op(s);
313 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
315 return (uint64_t)cpu_lduw_code(env, pc);
318 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
320 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
323 static int get_mem_index(DisasContext *s)
325 switch (s->tb->flags & FLAG_MASK_ASC) {
326 case PSW_ASC_PRIMARY >> 32:
327 return 0;
328 case PSW_ASC_SECONDARY >> 32:
329 return 1;
330 case PSW_ASC_HOME >> 32:
331 return 2;
332 default:
333 tcg_abort();
334 break;
338 static void gen_exception(int excp)
340 TCGv_i32 tmp = tcg_const_i32(excp);
341 gen_helper_exception(cpu_env, tmp);
342 tcg_temp_free_i32(tmp);
345 static void gen_program_exception(DisasContext *s, int code)
347 TCGv_i32 tmp;
349 /* Remember what pgm exeption this was. */
350 tmp = tcg_const_i32(code);
351 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
352 tcg_temp_free_i32(tmp);
354 tmp = tcg_const_i32(s->ilen);
355 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
356 tcg_temp_free_i32(tmp);
358 /* Advance past instruction. */
359 s->pc = s->next_pc;
360 update_psw_addr(s);
362 /* Save off cc. */
363 update_cc_op(s);
365 /* Trigger exception. */
366 gen_exception(EXCP_PGM);
369 static inline void gen_illegal_opcode(DisasContext *s)
371 gen_program_exception(s, PGM_OPERATION);
374 static inline void gen_trap(DisasContext *s)
376 TCGv_i32 t;
378 /* Set DXC to 0xff. */
379 t = tcg_temp_new_i32();
380 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
381 tcg_gen_ori_i32(t, t, 0xff00);
382 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
383 tcg_temp_free_i32(t);
385 gen_program_exception(s, PGM_DATA);
388 #ifndef CONFIG_USER_ONLY
389 static void check_privileged(DisasContext *s)
391 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
392 gen_program_exception(s, PGM_PRIVILEGED);
395 #endif
397 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
399 TCGv_i64 tmp = tcg_temp_new_i64();
400 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
402 /* Note that d2 is limited to 20 bits, signed. If we crop negative
403 displacements early we create larger immedate addends. */
405 /* Note that addi optimizes the imm==0 case. */
406 if (b2 && x2) {
407 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
408 tcg_gen_addi_i64(tmp, tmp, d2);
409 } else if (b2) {
410 tcg_gen_addi_i64(tmp, regs[b2], d2);
411 } else if (x2) {
412 tcg_gen_addi_i64(tmp, regs[x2], d2);
413 } else {
414 if (need_31) {
415 d2 &= 0x7fffffff;
416 need_31 = false;
418 tcg_gen_movi_i64(tmp, d2);
420 if (need_31) {
421 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
424 return tmp;
427 static inline bool live_cc_data(DisasContext *s)
429 return (s->cc_op != CC_OP_DYNAMIC
430 && s->cc_op != CC_OP_STATIC
431 && s->cc_op > 3);
434 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
436 if (live_cc_data(s)) {
437 tcg_gen_discard_i64(cc_src);
438 tcg_gen_discard_i64(cc_dst);
439 tcg_gen_discard_i64(cc_vr);
441 s->cc_op = CC_OP_CONST0 + val;
444 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
446 if (live_cc_data(s)) {
447 tcg_gen_discard_i64(cc_src);
448 tcg_gen_discard_i64(cc_vr);
450 tcg_gen_mov_i64(cc_dst, dst);
451 s->cc_op = op;
454 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
455 TCGv_i64 dst)
457 if (live_cc_data(s)) {
458 tcg_gen_discard_i64(cc_vr);
460 tcg_gen_mov_i64(cc_src, src);
461 tcg_gen_mov_i64(cc_dst, dst);
462 s->cc_op = op;
465 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
466 TCGv_i64 dst, TCGv_i64 vr)
468 tcg_gen_mov_i64(cc_src, src);
469 tcg_gen_mov_i64(cc_dst, dst);
470 tcg_gen_mov_i64(cc_vr, vr);
471 s->cc_op = op;
474 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
476 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
479 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
481 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
484 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
486 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
489 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
491 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
494 /* CC value is in env->cc_op */
495 static void set_cc_static(DisasContext *s)
497 if (live_cc_data(s)) {
498 tcg_gen_discard_i64(cc_src);
499 tcg_gen_discard_i64(cc_dst);
500 tcg_gen_discard_i64(cc_vr);
502 s->cc_op = CC_OP_STATIC;
505 /* calculates cc into cc_op */
506 static void gen_op_calc_cc(DisasContext *s)
508 TCGv_i32 local_cc_op;
509 TCGv_i64 dummy;
511 TCGV_UNUSED_I32(local_cc_op);
512 TCGV_UNUSED_I64(dummy);
513 switch (s->cc_op) {
514 default:
515 dummy = tcg_const_i64(0);
516 /* FALLTHRU */
517 case CC_OP_ADD_64:
518 case CC_OP_ADDU_64:
519 case CC_OP_ADDC_64:
520 case CC_OP_SUB_64:
521 case CC_OP_SUBU_64:
522 case CC_OP_SUBB_64:
523 case CC_OP_ADD_32:
524 case CC_OP_ADDU_32:
525 case CC_OP_ADDC_32:
526 case CC_OP_SUB_32:
527 case CC_OP_SUBU_32:
528 case CC_OP_SUBB_32:
529 local_cc_op = tcg_const_i32(s->cc_op);
530 break;
531 case CC_OP_CONST0:
532 case CC_OP_CONST1:
533 case CC_OP_CONST2:
534 case CC_OP_CONST3:
535 case CC_OP_STATIC:
536 case CC_OP_DYNAMIC:
537 break;
540 switch (s->cc_op) {
541 case CC_OP_CONST0:
542 case CC_OP_CONST1:
543 case CC_OP_CONST2:
544 case CC_OP_CONST3:
545 /* s->cc_op is the cc value */
546 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
547 break;
548 case CC_OP_STATIC:
549 /* env->cc_op already is the cc value */
550 break;
551 case CC_OP_NZ:
552 case CC_OP_ABS_64:
553 case CC_OP_NABS_64:
554 case CC_OP_ABS_32:
555 case CC_OP_NABS_32:
556 case CC_OP_LTGT0_32:
557 case CC_OP_LTGT0_64:
558 case CC_OP_COMP_32:
559 case CC_OP_COMP_64:
560 case CC_OP_NZ_F32:
561 case CC_OP_NZ_F64:
562 case CC_OP_FLOGR:
563 /* 1 argument */
564 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
565 break;
566 case CC_OP_ICM:
567 case CC_OP_LTGT_32:
568 case CC_OP_LTGT_64:
569 case CC_OP_LTUGTU_32:
570 case CC_OP_LTUGTU_64:
571 case CC_OP_TM_32:
572 case CC_OP_TM_64:
573 case CC_OP_SLA_32:
574 case CC_OP_SLA_64:
575 case CC_OP_NZ_F128:
576 /* 2 arguments */
577 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
578 break;
579 case CC_OP_ADD_64:
580 case CC_OP_ADDU_64:
581 case CC_OP_ADDC_64:
582 case CC_OP_SUB_64:
583 case CC_OP_SUBU_64:
584 case CC_OP_SUBB_64:
585 case CC_OP_ADD_32:
586 case CC_OP_ADDU_32:
587 case CC_OP_ADDC_32:
588 case CC_OP_SUB_32:
589 case CC_OP_SUBU_32:
590 case CC_OP_SUBB_32:
591 /* 3 arguments */
592 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
593 break;
594 case CC_OP_DYNAMIC:
595 /* unknown operation - assume 3 arguments and cc_op in env */
596 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
597 break;
598 default:
599 tcg_abort();
602 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
603 tcg_temp_free_i32(local_cc_op);
605 if (!TCGV_IS_UNUSED_I64(dummy)) {
606 tcg_temp_free_i64(dummy);
609 /* We now have cc in cc_op as constant */
610 set_cc_static(s);
613 static bool use_exit_tb(DisasContext *s)
615 return (s->singlestep_enabled ||
616 (s->tb->cflags & CF_LAST_IO) ||
617 (s->tb->flags & FLAG_MASK_PER));
620 static bool use_goto_tb(DisasContext *s, uint64_t dest)
622 if (unlikely(use_exit_tb(s))) {
623 return false;
625 #ifndef CONFIG_USER_ONLY
626 return (dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) ||
627 (dest & TARGET_PAGE_MASK) == (s->pc & TARGET_PAGE_MASK);
628 #else
629 return true;
630 #endif
633 static void account_noninline_branch(DisasContext *s, int cc_op)
635 #ifdef DEBUG_INLINE_BRANCHES
636 inline_branch_miss[cc_op]++;
637 #endif
640 static void account_inline_branch(DisasContext *s, int cc_op)
642 #ifdef DEBUG_INLINE_BRANCHES
643 inline_branch_hit[cc_op]++;
644 #endif
647 /* Table of mask values to comparison codes, given a comparison as input.
648 For such, CC=3 should not be possible. */
649 static const TCGCond ltgt_cond[16] = {
650 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
651 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
652 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
653 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
654 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
655 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
656 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
657 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
660 /* Table of mask values to comparison codes, given a logic op as input.
661 For such, only CC=0 and CC=1 should be possible. */
662 static const TCGCond nz_cond[16] = {
663 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
664 TCG_COND_NEVER, TCG_COND_NEVER,
665 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
666 TCG_COND_NE, TCG_COND_NE,
667 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
668 TCG_COND_EQ, TCG_COND_EQ,
669 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
670 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
673 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
674 details required to generate a TCG comparison. */
675 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
677 TCGCond cond;
678 enum cc_op old_cc_op = s->cc_op;
680 if (mask == 15 || mask == 0) {
681 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
682 c->u.s32.a = cc_op;
683 c->u.s32.b = cc_op;
684 c->g1 = c->g2 = true;
685 c->is_64 = false;
686 return;
689 /* Find the TCG condition for the mask + cc op. */
690 switch (old_cc_op) {
691 case CC_OP_LTGT0_32:
692 case CC_OP_LTGT0_64:
693 case CC_OP_LTGT_32:
694 case CC_OP_LTGT_64:
695 cond = ltgt_cond[mask];
696 if (cond == TCG_COND_NEVER) {
697 goto do_dynamic;
699 account_inline_branch(s, old_cc_op);
700 break;
702 case CC_OP_LTUGTU_32:
703 case CC_OP_LTUGTU_64:
704 cond = tcg_unsigned_cond(ltgt_cond[mask]);
705 if (cond == TCG_COND_NEVER) {
706 goto do_dynamic;
708 account_inline_branch(s, old_cc_op);
709 break;
711 case CC_OP_NZ:
712 cond = nz_cond[mask];
713 if (cond == TCG_COND_NEVER) {
714 goto do_dynamic;
716 account_inline_branch(s, old_cc_op);
717 break;
719 case CC_OP_TM_32:
720 case CC_OP_TM_64:
721 switch (mask) {
722 case 8:
723 cond = TCG_COND_EQ;
724 break;
725 case 4 | 2 | 1:
726 cond = TCG_COND_NE;
727 break;
728 default:
729 goto do_dynamic;
731 account_inline_branch(s, old_cc_op);
732 break;
734 case CC_OP_ICM:
735 switch (mask) {
736 case 8:
737 cond = TCG_COND_EQ;
738 break;
739 case 4 | 2 | 1:
740 case 4 | 2:
741 cond = TCG_COND_NE;
742 break;
743 default:
744 goto do_dynamic;
746 account_inline_branch(s, old_cc_op);
747 break;
749 case CC_OP_FLOGR:
750 switch (mask & 0xa) {
751 case 8: /* src == 0 -> no one bit found */
752 cond = TCG_COND_EQ;
753 break;
754 case 2: /* src != 0 -> one bit found */
755 cond = TCG_COND_NE;
756 break;
757 default:
758 goto do_dynamic;
760 account_inline_branch(s, old_cc_op);
761 break;
763 case CC_OP_ADDU_32:
764 case CC_OP_ADDU_64:
765 switch (mask) {
766 case 8 | 2: /* vr == 0 */
767 cond = TCG_COND_EQ;
768 break;
769 case 4 | 1: /* vr != 0 */
770 cond = TCG_COND_NE;
771 break;
772 case 8 | 4: /* no carry -> vr >= src */
773 cond = TCG_COND_GEU;
774 break;
775 case 2 | 1: /* carry -> vr < src */
776 cond = TCG_COND_LTU;
777 break;
778 default:
779 goto do_dynamic;
781 account_inline_branch(s, old_cc_op);
782 break;
784 case CC_OP_SUBU_32:
785 case CC_OP_SUBU_64:
786 /* Note that CC=0 is impossible; treat it as dont-care. */
787 switch (mask & 7) {
788 case 2: /* zero -> op1 == op2 */
789 cond = TCG_COND_EQ;
790 break;
791 case 4 | 1: /* !zero -> op1 != op2 */
792 cond = TCG_COND_NE;
793 break;
794 case 4: /* borrow (!carry) -> op1 < op2 */
795 cond = TCG_COND_LTU;
796 break;
797 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
798 cond = TCG_COND_GEU;
799 break;
800 default:
801 goto do_dynamic;
803 account_inline_branch(s, old_cc_op);
804 break;
806 default:
807 do_dynamic:
808 /* Calculate cc value. */
809 gen_op_calc_cc(s);
810 /* FALLTHRU */
812 case CC_OP_STATIC:
813 /* Jump based on CC. We'll load up the real cond below;
814 the assignment here merely avoids a compiler warning. */
815 account_noninline_branch(s, old_cc_op);
816 old_cc_op = CC_OP_STATIC;
817 cond = TCG_COND_NEVER;
818 break;
821 /* Load up the arguments of the comparison. */
822 c->is_64 = true;
823 c->g1 = c->g2 = false;
824 switch (old_cc_op) {
825 case CC_OP_LTGT0_32:
826 c->is_64 = false;
827 c->u.s32.a = tcg_temp_new_i32();
828 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
829 c->u.s32.b = tcg_const_i32(0);
830 break;
831 case CC_OP_LTGT_32:
832 case CC_OP_LTUGTU_32:
833 case CC_OP_SUBU_32:
834 c->is_64 = false;
835 c->u.s32.a = tcg_temp_new_i32();
836 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
837 c->u.s32.b = tcg_temp_new_i32();
838 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
839 break;
841 case CC_OP_LTGT0_64:
842 case CC_OP_NZ:
843 case CC_OP_FLOGR:
844 c->u.s64.a = cc_dst;
845 c->u.s64.b = tcg_const_i64(0);
846 c->g1 = true;
847 break;
848 case CC_OP_LTGT_64:
849 case CC_OP_LTUGTU_64:
850 case CC_OP_SUBU_64:
851 c->u.s64.a = cc_src;
852 c->u.s64.b = cc_dst;
853 c->g1 = c->g2 = true;
854 break;
856 case CC_OP_TM_32:
857 case CC_OP_TM_64:
858 case CC_OP_ICM:
859 c->u.s64.a = tcg_temp_new_i64();
860 c->u.s64.b = tcg_const_i64(0);
861 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
862 break;
864 case CC_OP_ADDU_32:
865 c->is_64 = false;
866 c->u.s32.a = tcg_temp_new_i32();
867 c->u.s32.b = tcg_temp_new_i32();
868 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
869 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
870 tcg_gen_movi_i32(c->u.s32.b, 0);
871 } else {
872 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
874 break;
876 case CC_OP_ADDU_64:
877 c->u.s64.a = cc_vr;
878 c->g1 = true;
879 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
880 c->u.s64.b = tcg_const_i64(0);
881 } else {
882 c->u.s64.b = cc_src;
883 c->g2 = true;
885 break;
887 case CC_OP_STATIC:
888 c->is_64 = false;
889 c->u.s32.a = cc_op;
890 c->g1 = true;
891 switch (mask) {
892 case 0x8 | 0x4 | 0x2: /* cc != 3 */
893 cond = TCG_COND_NE;
894 c->u.s32.b = tcg_const_i32(3);
895 break;
896 case 0x8 | 0x4 | 0x1: /* cc != 2 */
897 cond = TCG_COND_NE;
898 c->u.s32.b = tcg_const_i32(2);
899 break;
900 case 0x8 | 0x2 | 0x1: /* cc != 1 */
901 cond = TCG_COND_NE;
902 c->u.s32.b = tcg_const_i32(1);
903 break;
904 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
905 cond = TCG_COND_EQ;
906 c->g1 = false;
907 c->u.s32.a = tcg_temp_new_i32();
908 c->u.s32.b = tcg_const_i32(0);
909 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
910 break;
911 case 0x8 | 0x4: /* cc < 2 */
912 cond = TCG_COND_LTU;
913 c->u.s32.b = tcg_const_i32(2);
914 break;
915 case 0x8: /* cc == 0 */
916 cond = TCG_COND_EQ;
917 c->u.s32.b = tcg_const_i32(0);
918 break;
919 case 0x4 | 0x2 | 0x1: /* cc != 0 */
920 cond = TCG_COND_NE;
921 c->u.s32.b = tcg_const_i32(0);
922 break;
923 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
924 cond = TCG_COND_NE;
925 c->g1 = false;
926 c->u.s32.a = tcg_temp_new_i32();
927 c->u.s32.b = tcg_const_i32(0);
928 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
929 break;
930 case 0x4: /* cc == 1 */
931 cond = TCG_COND_EQ;
932 c->u.s32.b = tcg_const_i32(1);
933 break;
934 case 0x2 | 0x1: /* cc > 1 */
935 cond = TCG_COND_GTU;
936 c->u.s32.b = tcg_const_i32(1);
937 break;
938 case 0x2: /* cc == 2 */
939 cond = TCG_COND_EQ;
940 c->u.s32.b = tcg_const_i32(2);
941 break;
942 case 0x1: /* cc == 3 */
943 cond = TCG_COND_EQ;
944 c->u.s32.b = tcg_const_i32(3);
945 break;
946 default:
947 /* CC is masked by something else: (8 >> cc) & mask. */
948 cond = TCG_COND_NE;
949 c->g1 = false;
950 c->u.s32.a = tcg_const_i32(8);
951 c->u.s32.b = tcg_const_i32(0);
952 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
953 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
954 break;
956 break;
958 default:
959 abort();
961 c->cond = cond;
964 static void free_compare(DisasCompare *c)
966 if (!c->g1) {
967 if (c->is_64) {
968 tcg_temp_free_i64(c->u.s64.a);
969 } else {
970 tcg_temp_free_i32(c->u.s32.a);
973 if (!c->g2) {
974 if (c->is_64) {
975 tcg_temp_free_i64(c->u.s64.b);
976 } else {
977 tcg_temp_free_i32(c->u.s32.b);
982 /* ====================================================================== */
983 /* Define the insn format enumeration. */
984 #define F0(N) FMT_##N,
985 #define F1(N, X1) F0(N)
986 #define F2(N, X1, X2) F0(N)
987 #define F3(N, X1, X2, X3) F0(N)
988 #define F4(N, X1, X2, X3, X4) F0(N)
989 #define F5(N, X1, X2, X3, X4, X5) F0(N)
991 typedef enum {
992 #include "insn-format.def"
993 } DisasFormat;
995 #undef F0
996 #undef F1
997 #undef F2
998 #undef F3
999 #undef F4
1000 #undef F5
1002 /* Define a structure to hold the decoded fields. We'll store each inside
1003 an array indexed by an enum. In order to conserve memory, we'll arrange
1004 for fields that do not exist at the same time to overlap, thus the "C"
1005 for compact. For checking purposes there is an "O" for original index
1006 as well that will be applied to availability bitmaps. */
1008 enum DisasFieldIndexO {
1009 FLD_O_r1,
1010 FLD_O_r2,
1011 FLD_O_r3,
1012 FLD_O_m1,
1013 FLD_O_m3,
1014 FLD_O_m4,
1015 FLD_O_b1,
1016 FLD_O_b2,
1017 FLD_O_b4,
1018 FLD_O_d1,
1019 FLD_O_d2,
1020 FLD_O_d4,
1021 FLD_O_x2,
1022 FLD_O_l1,
1023 FLD_O_l2,
1024 FLD_O_i1,
1025 FLD_O_i2,
1026 FLD_O_i3,
1027 FLD_O_i4,
1028 FLD_O_i5
1031 enum DisasFieldIndexC {
1032 FLD_C_r1 = 0,
1033 FLD_C_m1 = 0,
1034 FLD_C_b1 = 0,
1035 FLD_C_i1 = 0,
1037 FLD_C_r2 = 1,
1038 FLD_C_b2 = 1,
1039 FLD_C_i2 = 1,
1041 FLD_C_r3 = 2,
1042 FLD_C_m3 = 2,
1043 FLD_C_i3 = 2,
1045 FLD_C_m4 = 3,
1046 FLD_C_b4 = 3,
1047 FLD_C_i4 = 3,
1048 FLD_C_l1 = 3,
1050 FLD_C_i5 = 4,
1051 FLD_C_d1 = 4,
1053 FLD_C_d2 = 5,
1055 FLD_C_d4 = 6,
1056 FLD_C_x2 = 6,
1057 FLD_C_l2 = 6,
1059 NUM_C_FIELD = 7
1062 struct DisasFields {
1063 uint64_t raw_insn;
1064 unsigned op:8;
1065 unsigned op2:8;
1066 unsigned presentC:16;
1067 unsigned int presentO;
1068 int c[NUM_C_FIELD];
1071 /* This is the way fields are to be accessed out of DisasFields. */
1072 #define have_field(S, F) have_field1((S), FLD_O_##F)
1073 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1075 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1077 return (f->presentO >> c) & 1;
1080 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1081 enum DisasFieldIndexC c)
1083 assert(have_field1(f, o));
1084 return f->c[c];
1087 /* Describe the layout of each field in each format. */
1088 typedef struct DisasField {
1089 unsigned int beg:8;
1090 unsigned int size:8;
1091 unsigned int type:2;
1092 unsigned int indexC:6;
1093 enum DisasFieldIndexO indexO:8;
1094 } DisasField;
1096 typedef struct DisasFormatInfo {
1097 DisasField op[NUM_C_FIELD];
1098 } DisasFormatInfo;
1100 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1101 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1102 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1103 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1104 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1105 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1106 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1107 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1108 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1109 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1110 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1111 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1112 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1113 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1115 #define F0(N) { { } },
1116 #define F1(N, X1) { { X1 } },
1117 #define F2(N, X1, X2) { { X1, X2 } },
1118 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1119 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1120 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1122 static const DisasFormatInfo format_info[] = {
1123 #include "insn-format.def"
1126 #undef F0
1127 #undef F1
1128 #undef F2
1129 #undef F3
1130 #undef F4
1131 #undef F5
1132 #undef R
1133 #undef M
1134 #undef BD
1135 #undef BXD
1136 #undef BDL
1137 #undef BXDL
1138 #undef I
1139 #undef L
1141 /* Generally, we'll extract operands into this structures, operate upon
1142 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1143 of routines below for more details. */
1144 typedef struct {
1145 bool g_out, g_out2, g_in1, g_in2;
1146 TCGv_i64 out, out2, in1, in2;
1147 TCGv_i64 addr1;
1148 } DisasOps;
1150 /* Instructions can place constraints on their operands, raising specification
1151 exceptions if they are violated. To make this easy to automate, each "in1",
1152 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1153 of the following, or 0. To make this easy to document, we'll put the
1154 SPEC_<name> defines next to <name>. */
1156 #define SPEC_r1_even 1
1157 #define SPEC_r2_even 2
1158 #define SPEC_r3_even 4
1159 #define SPEC_r1_f128 8
1160 #define SPEC_r2_f128 16
1162 /* Return values from translate_one, indicating the state of the TB. */
1163 typedef enum {
1164 /* Continue the TB. */
1165 NO_EXIT,
1166 /* We have emitted one or more goto_tb. No fixup required. */
1167 EXIT_GOTO_TB,
1168 /* We are not using a goto_tb (for whatever reason), but have updated
1169 the PC (for whatever reason), so there's no need to do it again on
1170 exiting the TB. */
1171 EXIT_PC_UPDATED,
1172 /* We have updated the PC and CC values. */
1173 EXIT_PC_CC_UPDATED,
1174 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1175 updated the PC for the next instruction to be executed. */
1176 EXIT_PC_STALE,
1177 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1178 No following code will be executed. */
1179 EXIT_NORETURN,
1180 } ExitStatus;
1182 typedef enum DisasFacility {
1183 FAC_Z, /* zarch (default) */
1184 FAC_CASS, /* compare and swap and store */
1185 FAC_CASS2, /* compare and swap and store 2*/
1186 FAC_DFP, /* decimal floating point */
1187 FAC_DFPR, /* decimal floating point rounding */
1188 FAC_DO, /* distinct operands */
1189 FAC_EE, /* execute extensions */
1190 FAC_EI, /* extended immediate */
1191 FAC_FPE, /* floating point extension */
1192 FAC_FPSSH, /* floating point support sign handling */
1193 FAC_FPRGR, /* FPR-GR transfer */
1194 FAC_GIE, /* general instructions extension */
1195 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1196 FAC_HW, /* high-word */
1197 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1198 FAC_MIE, /* miscellaneous-instruction-extensions */
1199 FAC_LAT, /* load-and-trap */
1200 FAC_LOC, /* load/store on condition */
1201 FAC_LD, /* long displacement */
1202 FAC_PC, /* population count */
1203 FAC_SCF, /* store clock fast */
1204 FAC_SFLE, /* store facility list extended */
1205 FAC_ILA, /* interlocked access facility 1 */
1206 FAC_LPP, /* load-program-parameter */
1207 FAC_DAT_ENH, /* DAT-enhancement */
1208 } DisasFacility;
1210 struct DisasInsn {
1211 unsigned opc:16;
1212 DisasFormat fmt:8;
1213 DisasFacility fac:8;
1214 unsigned spec:8;
1216 const char *name;
1218 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1219 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1220 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1221 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1222 void (*help_cout)(DisasContext *, DisasOps *);
1223 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1225 uint64_t data;
1228 /* ====================================================================== */
1229 /* Miscellaneous helpers, used by several operations. */
1231 static void help_l2_shift(DisasContext *s, DisasFields *f,
1232 DisasOps *o, int mask)
1234 int b2 = get_field(f, b2);
1235 int d2 = get_field(f, d2);
1237 if (b2 == 0) {
1238 o->in2 = tcg_const_i64(d2 & mask);
1239 } else {
1240 o->in2 = get_address(s, 0, b2, d2);
1241 tcg_gen_andi_i64(o->in2, o->in2, mask);
1245 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1247 if (dest == s->next_pc) {
1248 per_branch(s, true);
1249 return NO_EXIT;
1251 if (use_goto_tb(s, dest)) {
1252 update_cc_op(s);
1253 per_breaking_event(s);
1254 tcg_gen_goto_tb(0);
1255 tcg_gen_movi_i64(psw_addr, dest);
1256 tcg_gen_exit_tb((uintptr_t)s->tb);
1257 return EXIT_GOTO_TB;
1258 } else {
1259 tcg_gen_movi_i64(psw_addr, dest);
1260 per_branch(s, false);
1261 return EXIT_PC_UPDATED;
1265 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1266 bool is_imm, int imm, TCGv_i64 cdest)
1268 ExitStatus ret;
1269 uint64_t dest = s->pc + 2 * imm;
1270 TCGLabel *lab;
1272 /* Take care of the special cases first. */
1273 if (c->cond == TCG_COND_NEVER) {
1274 ret = NO_EXIT;
1275 goto egress;
1277 if (is_imm) {
1278 if (dest == s->next_pc) {
1279 /* Branch to next. */
1280 per_branch(s, true);
1281 ret = NO_EXIT;
1282 goto egress;
1284 if (c->cond == TCG_COND_ALWAYS) {
1285 ret = help_goto_direct(s, dest);
1286 goto egress;
1288 } else {
1289 if (TCGV_IS_UNUSED_I64(cdest)) {
1290 /* E.g. bcr %r0 -> no branch. */
1291 ret = NO_EXIT;
1292 goto egress;
1294 if (c->cond == TCG_COND_ALWAYS) {
1295 tcg_gen_mov_i64(psw_addr, cdest);
1296 per_branch(s, false);
1297 ret = EXIT_PC_UPDATED;
1298 goto egress;
1302 if (use_goto_tb(s, s->next_pc)) {
1303 if (is_imm && use_goto_tb(s, dest)) {
1304 /* Both exits can use goto_tb. */
1305 update_cc_op(s);
1307 lab = gen_new_label();
1308 if (c->is_64) {
1309 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1310 } else {
1311 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1314 /* Branch not taken. */
1315 tcg_gen_goto_tb(0);
1316 tcg_gen_movi_i64(psw_addr, s->next_pc);
1317 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1319 /* Branch taken. */
1320 gen_set_label(lab);
1321 per_breaking_event(s);
1322 tcg_gen_goto_tb(1);
1323 tcg_gen_movi_i64(psw_addr, dest);
1324 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1326 ret = EXIT_GOTO_TB;
1327 } else {
1328 /* Fallthru can use goto_tb, but taken branch cannot. */
1329 /* Store taken branch destination before the brcond. This
1330 avoids having to allocate a new local temp to hold it.
1331 We'll overwrite this in the not taken case anyway. */
1332 if (!is_imm) {
1333 tcg_gen_mov_i64(psw_addr, cdest);
1336 lab = gen_new_label();
1337 if (c->is_64) {
1338 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1339 } else {
1340 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1343 /* Branch not taken. */
1344 update_cc_op(s);
1345 tcg_gen_goto_tb(0);
1346 tcg_gen_movi_i64(psw_addr, s->next_pc);
1347 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1349 gen_set_label(lab);
1350 if (is_imm) {
1351 tcg_gen_movi_i64(psw_addr, dest);
1353 per_breaking_event(s);
1354 ret = EXIT_PC_UPDATED;
1356 } else {
1357 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1358 Most commonly we're single-stepping or some other condition that
1359 disables all use of goto_tb. Just update the PC and exit. */
1361 TCGv_i64 next = tcg_const_i64(s->next_pc);
1362 if (is_imm) {
1363 cdest = tcg_const_i64(dest);
1366 if (c->is_64) {
1367 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1368 cdest, next);
1369 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1370 } else {
1371 TCGv_i32 t0 = tcg_temp_new_i32();
1372 TCGv_i64 t1 = tcg_temp_new_i64();
1373 TCGv_i64 z = tcg_const_i64(0);
1374 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1375 tcg_gen_extu_i32_i64(t1, t0);
1376 tcg_temp_free_i32(t0);
1377 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1378 per_branch_cond(s, TCG_COND_NE, t1, z);
1379 tcg_temp_free_i64(t1);
1380 tcg_temp_free_i64(z);
1383 if (is_imm) {
1384 tcg_temp_free_i64(cdest);
1386 tcg_temp_free_i64(next);
1388 ret = EXIT_PC_UPDATED;
1391 egress:
1392 free_compare(c);
1393 return ret;
1396 /* ====================================================================== */
1397 /* The operations. These perform the bulk of the work for any insn,
1398 usually after the operands have been loaded and output initialized. */
1400 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1402 TCGv_i64 z, n;
1403 z = tcg_const_i64(0);
1404 n = tcg_temp_new_i64();
1405 tcg_gen_neg_i64(n, o->in2);
1406 tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1407 tcg_temp_free_i64(n);
1408 tcg_temp_free_i64(z);
1409 return NO_EXIT;
1412 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1414 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1415 return NO_EXIT;
1418 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1420 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1421 return NO_EXIT;
1424 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1426 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1427 tcg_gen_mov_i64(o->out2, o->in2);
1428 return NO_EXIT;
1431 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1433 tcg_gen_add_i64(o->out, o->in1, o->in2);
1434 return NO_EXIT;
1437 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1439 DisasCompare cmp;
1440 TCGv_i64 carry;
1442 tcg_gen_add_i64(o->out, o->in1, o->in2);
1444 /* The carry flag is the msb of CC, therefore the branch mask that would
1445 create that comparison is 3. Feeding the generated comparison to
1446 setcond produces the carry flag that we desire. */
1447 disas_jcc(s, &cmp, 3);
1448 carry = tcg_temp_new_i64();
1449 if (cmp.is_64) {
1450 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1451 } else {
1452 TCGv_i32 t = tcg_temp_new_i32();
1453 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1454 tcg_gen_extu_i32_i64(carry, t);
1455 tcg_temp_free_i32(t);
1457 free_compare(&cmp);
1459 tcg_gen_add_i64(o->out, o->out, carry);
1460 tcg_temp_free_i64(carry);
1461 return NO_EXIT;
1464 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1466 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1467 return NO_EXIT;
1470 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1472 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1473 return NO_EXIT;
1476 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1478 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1479 return_low128(o->out2);
1480 return NO_EXIT;
1483 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1485 tcg_gen_and_i64(o->out, o->in1, o->in2);
1486 return NO_EXIT;
1489 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1491 int shift = s->insn->data & 0xff;
1492 int size = s->insn->data >> 8;
1493 uint64_t mask = ((1ull << size) - 1) << shift;
1495 assert(!o->g_in2);
1496 tcg_gen_shli_i64(o->in2, o->in2, shift);
1497 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1498 tcg_gen_and_i64(o->out, o->in1, o->in2);
1500 /* Produce the CC from only the bits manipulated. */
1501 tcg_gen_andi_i64(cc_dst, o->out, mask);
1502 set_cc_nz_u64(s, cc_dst);
1503 return NO_EXIT;
1506 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1508 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1509 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1510 tcg_gen_mov_i64(psw_addr, o->in2);
1511 per_branch(s, false);
1512 return EXIT_PC_UPDATED;
1513 } else {
1514 return NO_EXIT;
1518 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1520 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1521 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1524 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1526 int m1 = get_field(s->fields, m1);
1527 bool is_imm = have_field(s->fields, i2);
1528 int imm = is_imm ? get_field(s->fields, i2) : 0;
1529 DisasCompare c;
1531 /* BCR with R2 = 0 causes no branching */
1532 if (have_field(s->fields, r2) && get_field(s->fields, r2) == 0) {
1533 if (m1 == 14) {
1534 /* Perform serialization */
1535 /* FIXME: check for fast-BCR-serialization facility */
1536 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1538 if (m1 == 15) {
1539 /* Perform serialization */
1540 /* FIXME: perform checkpoint-synchronisation */
1541 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1543 return NO_EXIT;
1546 disas_jcc(s, &c, m1);
1547 return help_branch(s, &c, is_imm, imm, o->in2);
1550 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1552 int r1 = get_field(s->fields, r1);
1553 bool is_imm = have_field(s->fields, i2);
1554 int imm = is_imm ? get_field(s->fields, i2) : 0;
1555 DisasCompare c;
1556 TCGv_i64 t;
1558 c.cond = TCG_COND_NE;
1559 c.is_64 = false;
1560 c.g1 = false;
1561 c.g2 = false;
1563 t = tcg_temp_new_i64();
1564 tcg_gen_subi_i64(t, regs[r1], 1);
1565 store_reg32_i64(r1, t);
1566 c.u.s32.a = tcg_temp_new_i32();
1567 c.u.s32.b = tcg_const_i32(0);
1568 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1569 tcg_temp_free_i64(t);
1571 return help_branch(s, &c, is_imm, imm, o->in2);
1574 static ExitStatus op_bcth(DisasContext *s, DisasOps *o)
1576 int r1 = get_field(s->fields, r1);
1577 int imm = get_field(s->fields, i2);
1578 DisasCompare c;
1579 TCGv_i64 t;
1581 c.cond = TCG_COND_NE;
1582 c.is_64 = false;
1583 c.g1 = false;
1584 c.g2 = false;
1586 t = tcg_temp_new_i64();
1587 tcg_gen_shri_i64(t, regs[r1], 32);
1588 tcg_gen_subi_i64(t, t, 1);
1589 store_reg32h_i64(r1, t);
1590 c.u.s32.a = tcg_temp_new_i32();
1591 c.u.s32.b = tcg_const_i32(0);
1592 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1593 tcg_temp_free_i64(t);
1595 return help_branch(s, &c, 1, imm, o->in2);
1598 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1600 int r1 = get_field(s->fields, r1);
1601 bool is_imm = have_field(s->fields, i2);
1602 int imm = is_imm ? get_field(s->fields, i2) : 0;
1603 DisasCompare c;
1605 c.cond = TCG_COND_NE;
1606 c.is_64 = true;
1607 c.g1 = true;
1608 c.g2 = false;
1610 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1611 c.u.s64.a = regs[r1];
1612 c.u.s64.b = tcg_const_i64(0);
1614 return help_branch(s, &c, is_imm, imm, o->in2);
1617 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1619 int r1 = get_field(s->fields, r1);
1620 int r3 = get_field(s->fields, r3);
1621 bool is_imm = have_field(s->fields, i2);
1622 int imm = is_imm ? get_field(s->fields, i2) : 0;
1623 DisasCompare c;
1624 TCGv_i64 t;
1626 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1627 c.is_64 = false;
1628 c.g1 = false;
1629 c.g2 = false;
1631 t = tcg_temp_new_i64();
1632 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1633 c.u.s32.a = tcg_temp_new_i32();
1634 c.u.s32.b = tcg_temp_new_i32();
1635 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1636 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1637 store_reg32_i64(r1, t);
1638 tcg_temp_free_i64(t);
1640 return help_branch(s, &c, is_imm, imm, o->in2);
1643 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1645 int r1 = get_field(s->fields, r1);
1646 int r3 = get_field(s->fields, r3);
1647 bool is_imm = have_field(s->fields, i2);
1648 int imm = is_imm ? get_field(s->fields, i2) : 0;
1649 DisasCompare c;
1651 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1652 c.is_64 = true;
1654 if (r1 == (r3 | 1)) {
1655 c.u.s64.b = load_reg(r3 | 1);
1656 c.g2 = false;
1657 } else {
1658 c.u.s64.b = regs[r3 | 1];
1659 c.g2 = true;
1662 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1663 c.u.s64.a = regs[r1];
1664 c.g1 = true;
1666 return help_branch(s, &c, is_imm, imm, o->in2);
1669 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1671 int imm, m3 = get_field(s->fields, m3);
1672 bool is_imm;
1673 DisasCompare c;
1675 c.cond = ltgt_cond[m3];
1676 if (s->insn->data) {
1677 c.cond = tcg_unsigned_cond(c.cond);
1679 c.is_64 = c.g1 = c.g2 = true;
1680 c.u.s64.a = o->in1;
1681 c.u.s64.b = o->in2;
1683 is_imm = have_field(s->fields, i4);
1684 if (is_imm) {
1685 imm = get_field(s->fields, i4);
1686 } else {
1687 imm = 0;
1688 o->out = get_address(s, 0, get_field(s->fields, b4),
1689 get_field(s->fields, d4));
1692 return help_branch(s, &c, is_imm, imm, o->out);
1695 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1697 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1698 set_cc_static(s);
1699 return NO_EXIT;
1702 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1704 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1705 set_cc_static(s);
1706 return NO_EXIT;
1709 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1711 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1712 set_cc_static(s);
1713 return NO_EXIT;
1716 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1718 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1719 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1720 tcg_temp_free_i32(m3);
1721 gen_set_cc_nz_f32(s, o->in2);
1722 return NO_EXIT;
1725 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1727 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1728 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1729 tcg_temp_free_i32(m3);
1730 gen_set_cc_nz_f64(s, o->in2);
1731 return NO_EXIT;
1734 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1736 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1737 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1738 tcg_temp_free_i32(m3);
1739 gen_set_cc_nz_f128(s, o->in1, o->in2);
1740 return NO_EXIT;
1743 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1745 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1746 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1747 tcg_temp_free_i32(m3);
1748 gen_set_cc_nz_f32(s, o->in2);
1749 return NO_EXIT;
1752 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1754 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1755 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1756 tcg_temp_free_i32(m3);
1757 gen_set_cc_nz_f64(s, o->in2);
1758 return NO_EXIT;
1761 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1763 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1764 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1765 tcg_temp_free_i32(m3);
1766 gen_set_cc_nz_f128(s, o->in1, o->in2);
1767 return NO_EXIT;
1770 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1772 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1773 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1774 tcg_temp_free_i32(m3);
1775 gen_set_cc_nz_f32(s, o->in2);
1776 return NO_EXIT;
1779 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1781 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1782 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1783 tcg_temp_free_i32(m3);
1784 gen_set_cc_nz_f64(s, o->in2);
1785 return NO_EXIT;
1788 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1790 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1791 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1792 tcg_temp_free_i32(m3);
1793 gen_set_cc_nz_f128(s, o->in1, o->in2);
1794 return NO_EXIT;
1797 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1799 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1800 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1801 tcg_temp_free_i32(m3);
1802 gen_set_cc_nz_f32(s, o->in2);
1803 return NO_EXIT;
1806 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1808 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1809 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1810 tcg_temp_free_i32(m3);
1811 gen_set_cc_nz_f64(s, o->in2);
1812 return NO_EXIT;
1815 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1817 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1818 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1819 tcg_temp_free_i32(m3);
1820 gen_set_cc_nz_f128(s, o->in1, o->in2);
1821 return NO_EXIT;
1824 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1826 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1827 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1828 tcg_temp_free_i32(m3);
1829 return NO_EXIT;
1832 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1834 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1835 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1836 tcg_temp_free_i32(m3);
1837 return NO_EXIT;
1840 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1842 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1843 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1844 tcg_temp_free_i32(m3);
1845 return_low128(o->out2);
1846 return NO_EXIT;
1849 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1851 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1852 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1853 tcg_temp_free_i32(m3);
1854 return NO_EXIT;
1857 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1859 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1860 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1861 tcg_temp_free_i32(m3);
1862 return NO_EXIT;
1865 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1867 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1868 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1869 tcg_temp_free_i32(m3);
1870 return_low128(o->out2);
1871 return NO_EXIT;
1874 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1876 int r2 = get_field(s->fields, r2);
1877 TCGv_i64 len = tcg_temp_new_i64();
1879 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1880 set_cc_static(s);
1881 return_low128(o->out);
1883 tcg_gen_add_i64(regs[r2], regs[r2], len);
1884 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1885 tcg_temp_free_i64(len);
1887 return NO_EXIT;
1890 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1892 int l = get_field(s->fields, l1);
1893 TCGv_i32 vl;
1895 switch (l + 1) {
1896 case 1:
1897 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1898 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1899 break;
1900 case 2:
1901 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1902 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1903 break;
1904 case 4:
1905 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1906 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1907 break;
1908 case 8:
1909 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1910 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1911 break;
1912 default:
1913 vl = tcg_const_i32(l);
1914 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1915 tcg_temp_free_i32(vl);
1916 set_cc_static(s);
1917 return NO_EXIT;
1919 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1920 return NO_EXIT;
1923 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1925 int r1 = get_field(s->fields, r1);
1926 int r3 = get_field(s->fields, r3);
1927 TCGv_i32 t1, t3;
1929 /* r1 and r3 must be even. */
1930 if (r1 & 1 || r3 & 1) {
1931 gen_program_exception(s, PGM_SPECIFICATION);
1932 return EXIT_NORETURN;
1935 t1 = tcg_const_i32(r1);
1936 t3 = tcg_const_i32(r3);
1937 gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
1938 tcg_temp_free_i32(t1);
1939 tcg_temp_free_i32(t3);
1940 set_cc_static(s);
1941 return NO_EXIT;
1944 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1946 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1947 TCGv_i32 t1 = tcg_temp_new_i32();
1948 tcg_gen_extrl_i64_i32(t1, o->in1);
1949 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1950 set_cc_static(s);
1951 tcg_temp_free_i32(t1);
1952 tcg_temp_free_i32(m3);
1953 return NO_EXIT;
1956 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1958 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1959 set_cc_static(s);
1960 return_low128(o->in2);
1961 return NO_EXIT;
1964 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1966 TCGv_i64 t = tcg_temp_new_i64();
1967 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1968 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1969 tcg_gen_or_i64(o->out, o->out, t);
1970 tcg_temp_free_i64(t);
1971 return NO_EXIT;
1974 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1976 int d2 = get_field(s->fields, d2);
1977 int b2 = get_field(s->fields, b2);
1978 TCGv_i64 addr, cc;
1980 /* Note that in1 = R3 (new value) and
1981 in2 = (zero-extended) R1 (expected value). */
1983 addr = get_address(s, 0, b2, d2);
1984 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
1985 get_mem_index(s), s->insn->data | MO_ALIGN);
1986 tcg_temp_free_i64(addr);
1988 /* Are the memory and expected values (un)equal? Note that this setcond
1989 produces the output CC value, thus the NE sense of the test. */
1990 cc = tcg_temp_new_i64();
1991 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1992 tcg_gen_extrl_i64_i32(cc_op, cc);
1993 tcg_temp_free_i64(cc);
1994 set_cc_static(s);
1996 return NO_EXIT;
1999 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
2001 int r1 = get_field(s->fields, r1);
2002 int r3 = get_field(s->fields, r3);
2003 int d2 = get_field(s->fields, d2);
2004 int b2 = get_field(s->fields, b2);
2005 TCGv_i64 addr;
2006 TCGv_i32 t_r1, t_r3;
2008 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2009 addr = get_address(s, 0, b2, d2);
2010 t_r1 = tcg_const_i32(r1);
2011 t_r3 = tcg_const_i32(r3);
2012 gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2013 tcg_temp_free_i64(addr);
2014 tcg_temp_free_i32(t_r1);
2015 tcg_temp_free_i32(t_r3);
2017 set_cc_static(s);
2018 return NO_EXIT;
2021 #ifndef CONFIG_USER_ONLY
2022 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
2024 TCGMemOp mop = s->insn->data;
2025 TCGv_i64 addr, old, cc;
2026 TCGLabel *lab = gen_new_label();
2028 /* Note that in1 = R1 (zero-extended expected value),
2029 out = R1 (original reg), out2 = R1+1 (new value). */
2031 check_privileged(s);
2032 addr = tcg_temp_new_i64();
2033 old = tcg_temp_new_i64();
2034 tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2035 tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2036 get_mem_index(s), mop | MO_ALIGN);
2037 tcg_temp_free_i64(addr);
2039 /* Are the memory and expected values (un)equal? */
2040 cc = tcg_temp_new_i64();
2041 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2042 tcg_gen_extrl_i64_i32(cc_op, cc);
2044 /* Write back the output now, so that it happens before the
2045 following branch, so that we don't need local temps. */
2046 if ((mop & MO_SIZE) == MO_32) {
2047 tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2048 } else {
2049 tcg_gen_mov_i64(o->out, old);
2051 tcg_temp_free_i64(old);
2053 /* If the comparison was equal, and the LSB of R2 was set,
2054 then we need to flush the TLB (for all cpus). */
2055 tcg_gen_xori_i64(cc, cc, 1);
2056 tcg_gen_and_i64(cc, cc, o->in2);
2057 tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2058 tcg_temp_free_i64(cc);
2060 gen_helper_purge(cpu_env);
2061 gen_set_label(lab);
2063 return NO_EXIT;
2065 #endif
2067 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2069 TCGv_i64 t1 = tcg_temp_new_i64();
2070 TCGv_i32 t2 = tcg_temp_new_i32();
2071 tcg_gen_extrl_i64_i32(t2, o->in1);
2072 gen_helper_cvd(t1, t2);
2073 tcg_temp_free_i32(t2);
2074 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2075 tcg_temp_free_i64(t1);
2076 return NO_EXIT;
2079 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
2081 int m3 = get_field(s->fields, m3);
2082 TCGLabel *lab = gen_new_label();
2083 TCGCond c;
2085 c = tcg_invert_cond(ltgt_cond[m3]);
2086 if (s->insn->data) {
2087 c = tcg_unsigned_cond(c);
2089 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2091 /* Trap. */
2092 gen_trap(s);
2094 gen_set_label(lab);
2095 return NO_EXIT;
2098 #ifndef CONFIG_USER_ONLY
2099 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2101 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2102 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2103 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2105 check_privileged(s);
2106 update_psw_addr(s);
2107 gen_op_calc_cc(s);
2109 gen_helper_diag(cpu_env, r1, r3, func_code);
2111 tcg_temp_free_i32(func_code);
2112 tcg_temp_free_i32(r3);
2113 tcg_temp_free_i32(r1);
2114 return NO_EXIT;
2116 #endif
2118 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2120 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2121 return_low128(o->out);
2122 return NO_EXIT;
2125 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2127 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2128 return_low128(o->out);
2129 return NO_EXIT;
2132 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2134 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2135 return_low128(o->out);
2136 return NO_EXIT;
2139 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2141 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2142 return_low128(o->out);
2143 return NO_EXIT;
2146 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2148 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2149 return NO_EXIT;
2152 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2154 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2155 return NO_EXIT;
2158 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2160 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2161 return_low128(o->out2);
2162 return NO_EXIT;
2165 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2167 int r2 = get_field(s->fields, r2);
2168 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2169 return NO_EXIT;
2172 static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
2174 /* No cache information provided. */
2175 tcg_gen_movi_i64(o->out, -1);
2176 return NO_EXIT;
2179 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2181 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2182 return NO_EXIT;
2185 static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
2187 int r1 = get_field(s->fields, r1);
2188 int r2 = get_field(s->fields, r2);
2189 TCGv_i64 t = tcg_temp_new_i64();
2191 /* Note the "subsequently" in the PoO, which implies a defined result
2192 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2193 tcg_gen_shri_i64(t, psw_mask, 32);
2194 store_reg32_i64(r1, t);
2195 if (r2 != 0) {
2196 store_reg32_i64(r2, psw_mask);
2199 tcg_temp_free_i64(t);
2200 return NO_EXIT;
2203 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2205 int r1 = get_field(s->fields, r1);
2206 TCGv_i32 ilen;
2207 TCGv_i64 v1;
2209 /* Nested EXECUTE is not allowed. */
2210 if (unlikely(s->ex_value)) {
2211 gen_program_exception(s, PGM_EXECUTE);
2212 return EXIT_NORETURN;
2215 update_psw_addr(s);
2216 update_cc_op(s);
2218 if (r1 == 0) {
2219 v1 = tcg_const_i64(0);
2220 } else {
2221 v1 = regs[r1];
2224 ilen = tcg_const_i32(s->ilen);
2225 gen_helper_ex(cpu_env, ilen, v1, o->in2);
2226 tcg_temp_free_i32(ilen);
2228 if (r1 == 0) {
2229 tcg_temp_free_i64(v1);
2232 return EXIT_PC_CC_UPDATED;
2235 static ExitStatus op_fieb(DisasContext *s, DisasOps *o)
2237 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2238 gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2239 tcg_temp_free_i32(m3);
2240 return NO_EXIT;
2243 static ExitStatus op_fidb(DisasContext *s, DisasOps *o)
2245 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2246 gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2247 tcg_temp_free_i32(m3);
2248 return NO_EXIT;
2251 static ExitStatus op_fixb(DisasContext *s, DisasOps *o)
2253 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2254 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2255 return_low128(o->out2);
2256 tcg_temp_free_i32(m3);
2257 return NO_EXIT;
2260 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2262 /* We'll use the original input for cc computation, since we get to
2263 compare that against 0, which ought to be better than comparing
2264 the real output against 64. It also lets cc_dst be a convenient
2265 temporary during our computation. */
2266 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2268 /* R1 = IN ? CLZ(IN) : 64. */
2269 tcg_gen_clzi_i64(o->out, o->in2, 64);
2271 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2272 value by 64, which is undefined. But since the shift is 64 iff the
2273 input is zero, we still get the correct result after and'ing. */
2274 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2275 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2276 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2277 return NO_EXIT;
2280 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2282 int m3 = get_field(s->fields, m3);
2283 int pos, len, base = s->insn->data;
2284 TCGv_i64 tmp = tcg_temp_new_i64();
2285 uint64_t ccm;
2287 switch (m3) {
2288 case 0xf:
2289 /* Effectively a 32-bit load. */
2290 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2291 len = 32;
2292 goto one_insert;
2294 case 0xc:
2295 case 0x6:
2296 case 0x3:
2297 /* Effectively a 16-bit load. */
2298 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2299 len = 16;
2300 goto one_insert;
2302 case 0x8:
2303 case 0x4:
2304 case 0x2:
2305 case 0x1:
2306 /* Effectively an 8-bit load. */
2307 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2308 len = 8;
2309 goto one_insert;
2311 one_insert:
2312 pos = base + ctz32(m3) * 8;
2313 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2314 ccm = ((1ull << len) - 1) << pos;
2315 break;
2317 default:
2318 /* This is going to be a sequence of loads and inserts. */
2319 pos = base + 32 - 8;
2320 ccm = 0;
2321 while (m3) {
2322 if (m3 & 0x8) {
2323 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2324 tcg_gen_addi_i64(o->in2, o->in2, 1);
2325 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2326 ccm |= 0xff << pos;
2328 m3 = (m3 << 1) & 0xf;
2329 pos -= 8;
2331 break;
2334 tcg_gen_movi_i64(tmp, ccm);
2335 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2336 tcg_temp_free_i64(tmp);
2337 return NO_EXIT;
2340 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2342 int shift = s->insn->data & 0xff;
2343 int size = s->insn->data >> 8;
2344 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2345 return NO_EXIT;
2348 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2350 TCGv_i64 t1;
2352 gen_op_calc_cc(s);
2353 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2355 t1 = tcg_temp_new_i64();
2356 tcg_gen_shli_i64(t1, psw_mask, 20);
2357 tcg_gen_shri_i64(t1, t1, 36);
2358 tcg_gen_or_i64(o->out, o->out, t1);
2360 tcg_gen_extu_i32_i64(t1, cc_op);
2361 tcg_gen_shli_i64(t1, t1, 28);
2362 tcg_gen_or_i64(o->out, o->out, t1);
2363 tcg_temp_free_i64(t1);
2364 return NO_EXIT;
2367 #ifndef CONFIG_USER_ONLY
2368 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2370 TCGv_i32 m4;
2372 check_privileged(s);
2373 m4 = tcg_const_i32(get_field(s->fields, m4));
2374 gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2375 tcg_temp_free_i32(m4);
2376 return NO_EXIT;
2379 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2381 check_privileged(s);
2382 gen_helper_iske(o->out, cpu_env, o->in2);
2383 return NO_EXIT;
2385 #endif
2387 static ExitStatus op_keb(DisasContext *s, DisasOps *o)
2389 gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2390 set_cc_static(s);
2391 return NO_EXIT;
2394 static ExitStatus op_kdb(DisasContext *s, DisasOps *o)
2396 gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2397 set_cc_static(s);
2398 return NO_EXIT;
2401 static ExitStatus op_kxb(DisasContext *s, DisasOps *o)
2403 gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2404 set_cc_static(s);
2405 return NO_EXIT;
2408 static ExitStatus op_laa(DisasContext *s, DisasOps *o)
2410 /* The real output is indeed the original value in memory;
2411 recompute the addition for the computation of CC. */
2412 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2413 s->insn->data | MO_ALIGN);
2414 /* However, we need to recompute the addition for setting CC. */
2415 tcg_gen_add_i64(o->out, o->in1, o->in2);
2416 return NO_EXIT;
2419 static ExitStatus op_lan(DisasContext *s, DisasOps *o)
2421 /* The real output is indeed the original value in memory;
2422 recompute the addition for the computation of CC. */
2423 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2424 s->insn->data | MO_ALIGN);
2425 /* However, we need to recompute the operation for setting CC. */
2426 tcg_gen_and_i64(o->out, o->in1, o->in2);
2427 return NO_EXIT;
2430 static ExitStatus op_lao(DisasContext *s, DisasOps *o)
2432 /* The real output is indeed the original value in memory;
2433 recompute the addition for the computation of CC. */
2434 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2435 s->insn->data | MO_ALIGN);
2436 /* However, we need to recompute the operation for setting CC. */
2437 tcg_gen_or_i64(o->out, o->in1, o->in2);
2438 return NO_EXIT;
2441 static ExitStatus op_lax(DisasContext *s, DisasOps *o)
2443 /* The real output is indeed the original value in memory;
2444 recompute the addition for the computation of CC. */
2445 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2446 s->insn->data | MO_ALIGN);
2447 /* However, we need to recompute the operation for setting CC. */
2448 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2449 return NO_EXIT;
2452 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2454 gen_helper_ldeb(o->out, cpu_env, o->in2);
2455 return NO_EXIT;
2458 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2460 gen_helper_ledb(o->out, cpu_env, o->in2);
2461 return NO_EXIT;
2464 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2466 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2467 return NO_EXIT;
2470 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2472 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2473 return NO_EXIT;
2476 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2478 gen_helper_lxdb(o->out, cpu_env, o->in2);
2479 return_low128(o->out2);
2480 return NO_EXIT;
2483 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2485 gen_helper_lxeb(o->out, cpu_env, o->in2);
2486 return_low128(o->out2);
2487 return NO_EXIT;
2490 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2492 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2493 return NO_EXIT;
2496 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2498 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2499 return NO_EXIT;
2502 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2504 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2505 return NO_EXIT;
2508 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2510 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2511 return NO_EXIT;
2514 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2516 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2517 return NO_EXIT;
2520 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2522 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2523 return NO_EXIT;
2526 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2528 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2529 return NO_EXIT;
2532 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2534 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2535 return NO_EXIT;
2538 static ExitStatus op_lat(DisasContext *s, DisasOps *o)
2540 TCGLabel *lab = gen_new_label();
2541 store_reg32_i64(get_field(s->fields, r1), o->in2);
2542 /* The value is stored even in case of trap. */
2543 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2544 gen_trap(s);
2545 gen_set_label(lab);
2546 return NO_EXIT;
2549 static ExitStatus op_lgat(DisasContext *s, DisasOps *o)
2551 TCGLabel *lab = gen_new_label();
2552 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2553 /* The value is stored even in case of trap. */
2554 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2555 gen_trap(s);
2556 gen_set_label(lab);
2557 return NO_EXIT;
2560 static ExitStatus op_lfhat(DisasContext *s, DisasOps *o)
2562 TCGLabel *lab = gen_new_label();
2563 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2564 /* The value is stored even in case of trap. */
2565 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2566 gen_trap(s);
2567 gen_set_label(lab);
2568 return NO_EXIT;
2571 static ExitStatus op_llgfat(DisasContext *s, DisasOps *o)
2573 TCGLabel *lab = gen_new_label();
2574 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2575 /* The value is stored even in case of trap. */
2576 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2577 gen_trap(s);
2578 gen_set_label(lab);
2579 return NO_EXIT;
2582 static ExitStatus op_llgtat(DisasContext *s, DisasOps *o)
2584 TCGLabel *lab = gen_new_label();
2585 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2586 /* The value is stored even in case of trap. */
2587 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2588 gen_trap(s);
2589 gen_set_label(lab);
2590 return NO_EXIT;
2593 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2595 DisasCompare c;
2597 disas_jcc(s, &c, get_field(s->fields, m3));
2599 if (c.is_64) {
2600 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2601 o->in2, o->in1);
2602 free_compare(&c);
2603 } else {
2604 TCGv_i32 t32 = tcg_temp_new_i32();
2605 TCGv_i64 t, z;
2607 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2608 free_compare(&c);
2610 t = tcg_temp_new_i64();
2611 tcg_gen_extu_i32_i64(t, t32);
2612 tcg_temp_free_i32(t32);
2614 z = tcg_const_i64(0);
2615 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2616 tcg_temp_free_i64(t);
2617 tcg_temp_free_i64(z);
2620 return NO_EXIT;
2623 #ifndef CONFIG_USER_ONLY
2624 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2626 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2627 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2628 check_privileged(s);
2629 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2630 tcg_temp_free_i32(r1);
2631 tcg_temp_free_i32(r3);
2632 return NO_EXIT;
2635 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2637 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2638 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2639 check_privileged(s);
2640 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2641 tcg_temp_free_i32(r1);
2642 tcg_temp_free_i32(r3);
2643 return NO_EXIT;
2646 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2648 check_privileged(s);
2649 gen_helper_lra(o->out, cpu_env, o->in2);
2650 set_cc_static(s);
2651 return NO_EXIT;
2654 static ExitStatus op_lpp(DisasContext *s, DisasOps *o)
2656 check_privileged(s);
2658 tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
2659 return NO_EXIT;
2662 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2664 TCGv_i64 t1, t2;
2666 check_privileged(s);
2667 per_breaking_event(s);
2669 t1 = tcg_temp_new_i64();
2670 t2 = tcg_temp_new_i64();
2671 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2672 tcg_gen_addi_i64(o->in2, o->in2, 4);
2673 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2674 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2675 tcg_gen_shli_i64(t1, t1, 32);
2676 gen_helper_load_psw(cpu_env, t1, t2);
2677 tcg_temp_free_i64(t1);
2678 tcg_temp_free_i64(t2);
2679 return EXIT_NORETURN;
2682 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2684 TCGv_i64 t1, t2;
2686 check_privileged(s);
2687 per_breaking_event(s);
2689 t1 = tcg_temp_new_i64();
2690 t2 = tcg_temp_new_i64();
2691 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2692 tcg_gen_addi_i64(o->in2, o->in2, 8);
2693 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2694 gen_helper_load_psw(cpu_env, t1, t2);
2695 tcg_temp_free_i64(t1);
2696 tcg_temp_free_i64(t2);
2697 return EXIT_NORETURN;
2699 #endif
2701 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2703 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2704 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2705 gen_helper_lam(cpu_env, r1, o->in2, r3);
2706 tcg_temp_free_i32(r1);
2707 tcg_temp_free_i32(r3);
2708 return NO_EXIT;
2711 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2713 int r1 = get_field(s->fields, r1);
2714 int r3 = get_field(s->fields, r3);
2715 TCGv_i64 t1, t2;
2717 /* Only one register to read. */
2718 t1 = tcg_temp_new_i64();
2719 if (unlikely(r1 == r3)) {
2720 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2721 store_reg32_i64(r1, t1);
2722 tcg_temp_free(t1);
2723 return NO_EXIT;
2726 /* First load the values of the first and last registers to trigger
2727 possible page faults. */
2728 t2 = tcg_temp_new_i64();
2729 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2730 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2731 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2732 store_reg32_i64(r1, t1);
2733 store_reg32_i64(r3, t2);
2735 /* Only two registers to read. */
2736 if (((r1 + 1) & 15) == r3) {
2737 tcg_temp_free(t2);
2738 tcg_temp_free(t1);
2739 return NO_EXIT;
2742 /* Then load the remaining registers. Page fault can't occur. */
2743 r3 = (r3 - 1) & 15;
2744 tcg_gen_movi_i64(t2, 4);
2745 while (r1 != r3) {
2746 r1 = (r1 + 1) & 15;
2747 tcg_gen_add_i64(o->in2, o->in2, t2);
2748 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2749 store_reg32_i64(r1, t1);
2751 tcg_temp_free(t2);
2752 tcg_temp_free(t1);
2754 return NO_EXIT;
2757 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2759 int r1 = get_field(s->fields, r1);
2760 int r3 = get_field(s->fields, r3);
2761 TCGv_i64 t1, t2;
2763 /* Only one register to read. */
2764 t1 = tcg_temp_new_i64();
2765 if (unlikely(r1 == r3)) {
2766 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2767 store_reg32h_i64(r1, t1);
2768 tcg_temp_free(t1);
2769 return NO_EXIT;
2772 /* First load the values of the first and last registers to trigger
2773 possible page faults. */
2774 t2 = tcg_temp_new_i64();
2775 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2776 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2777 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2778 store_reg32h_i64(r1, t1);
2779 store_reg32h_i64(r3, t2);
2781 /* Only two registers to read. */
2782 if (((r1 + 1) & 15) == r3) {
2783 tcg_temp_free(t2);
2784 tcg_temp_free(t1);
2785 return NO_EXIT;
2788 /* Then load the remaining registers. Page fault can't occur. */
2789 r3 = (r3 - 1) & 15;
2790 tcg_gen_movi_i64(t2, 4);
2791 while (r1 != r3) {
2792 r1 = (r1 + 1) & 15;
2793 tcg_gen_add_i64(o->in2, o->in2, t2);
2794 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2795 store_reg32h_i64(r1, t1);
2797 tcg_temp_free(t2);
2798 tcg_temp_free(t1);
2800 return NO_EXIT;
2803 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2805 int r1 = get_field(s->fields, r1);
2806 int r3 = get_field(s->fields, r3);
2807 TCGv_i64 t1, t2;
2809 /* Only one register to read. */
2810 if (unlikely(r1 == r3)) {
2811 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2812 return NO_EXIT;
2815 /* First load the values of the first and last registers to trigger
2816 possible page faults. */
2817 t1 = tcg_temp_new_i64();
2818 t2 = tcg_temp_new_i64();
2819 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2820 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
2821 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
2822 tcg_gen_mov_i64(regs[r1], t1);
2823 tcg_temp_free(t2);
2825 /* Only two registers to read. */
2826 if (((r1 + 1) & 15) == r3) {
2827 tcg_temp_free(t1);
2828 return NO_EXIT;
2831 /* Then load the remaining registers. Page fault can't occur. */
2832 r3 = (r3 - 1) & 15;
2833 tcg_gen_movi_i64(t1, 8);
2834 while (r1 != r3) {
2835 r1 = (r1 + 1) & 15;
2836 tcg_gen_add_i64(o->in2, o->in2, t1);
2837 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2839 tcg_temp_free(t1);
2841 return NO_EXIT;
2844 static ExitStatus op_lpd(DisasContext *s, DisasOps *o)
2846 TCGv_i64 a1, a2;
2847 TCGMemOp mop = s->insn->data;
2849 /* In a parallel context, stop the world and single step. */
2850 if (parallel_cpus) {
2851 potential_page_fault(s);
2852 gen_exception(EXCP_ATOMIC);
2853 return EXIT_NORETURN;
2856 /* In a serial context, perform the two loads ... */
2857 a1 = get_address(s, 0, get_field(s->fields, b1), get_field(s->fields, d1));
2858 a2 = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
2859 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
2860 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
2861 tcg_temp_free_i64(a1);
2862 tcg_temp_free_i64(a2);
2864 /* ... and indicate that we performed them while interlocked. */
2865 gen_op_movi_cc(s, 0);
2866 return NO_EXIT;
2869 #ifndef CONFIG_USER_ONLY
2870 static ExitStatus op_lura(DisasContext *s, DisasOps *o)
2872 check_privileged(s);
2873 potential_page_fault(s);
2874 gen_helper_lura(o->out, cpu_env, o->in2);
2875 return NO_EXIT;
2878 static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
2880 check_privileged(s);
2881 potential_page_fault(s);
2882 gen_helper_lurag(o->out, cpu_env, o->in2);
2883 return NO_EXIT;
2885 #endif
2887 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2889 o->out = o->in2;
2890 o->g_out = o->g_in2;
2891 TCGV_UNUSED_I64(o->in2);
2892 o->g_in2 = false;
2893 return NO_EXIT;
2896 static ExitStatus op_mov2e(DisasContext *s, DisasOps *o)
2898 int b2 = get_field(s->fields, b2);
2899 TCGv ar1 = tcg_temp_new_i64();
2901 o->out = o->in2;
2902 o->g_out = o->g_in2;
2903 TCGV_UNUSED_I64(o->in2);
2904 o->g_in2 = false;
2906 switch (s->tb->flags & FLAG_MASK_ASC) {
2907 case PSW_ASC_PRIMARY >> 32:
2908 tcg_gen_movi_i64(ar1, 0);
2909 break;
2910 case PSW_ASC_ACCREG >> 32:
2911 tcg_gen_movi_i64(ar1, 1);
2912 break;
2913 case PSW_ASC_SECONDARY >> 32:
2914 if (b2) {
2915 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
2916 } else {
2917 tcg_gen_movi_i64(ar1, 0);
2919 break;
2920 case PSW_ASC_HOME >> 32:
2921 tcg_gen_movi_i64(ar1, 2);
2922 break;
2925 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
2926 tcg_temp_free_i64(ar1);
2928 return NO_EXIT;
2931 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2933 o->out = o->in1;
2934 o->out2 = o->in2;
2935 o->g_out = o->g_in1;
2936 o->g_out2 = o->g_in2;
2937 TCGV_UNUSED_I64(o->in1);
2938 TCGV_UNUSED_I64(o->in2);
2939 o->g_in1 = o->g_in2 = false;
2940 return NO_EXIT;
2943 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2945 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2946 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2947 tcg_temp_free_i32(l);
2948 return NO_EXIT;
2951 static ExitStatus op_mvcin(DisasContext *s, DisasOps *o)
2953 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2954 gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
2955 tcg_temp_free_i32(l);
2956 return NO_EXIT;
2959 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2961 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2962 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2963 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2964 tcg_temp_free_i32(r1);
2965 tcg_temp_free_i32(r2);
2966 set_cc_static(s);
2967 return NO_EXIT;
2970 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2972 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2973 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2974 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2975 tcg_temp_free_i32(r1);
2976 tcg_temp_free_i32(r3);
2977 set_cc_static(s);
2978 return NO_EXIT;
2981 #ifndef CONFIG_USER_ONLY
2982 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2984 int r1 = get_field(s->fields, l1);
2985 check_privileged(s);
2986 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2987 set_cc_static(s);
2988 return NO_EXIT;
2991 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2993 int r1 = get_field(s->fields, l1);
2994 check_privileged(s);
2995 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2996 set_cc_static(s);
2997 return NO_EXIT;
2999 #endif
3001 static ExitStatus op_mvn(DisasContext *s, DisasOps *o)
3003 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3004 gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3005 tcg_temp_free_i32(l);
3006 return NO_EXIT;
3009 static ExitStatus op_mvo(DisasContext *s, DisasOps *o)
3011 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3012 gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3013 tcg_temp_free_i32(l);
3014 return NO_EXIT;
3017 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
3019 gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
3020 set_cc_static(s);
3021 return NO_EXIT;
3024 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
3026 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3027 set_cc_static(s);
3028 return_low128(o->in2);
3029 return NO_EXIT;
3032 static ExitStatus op_mvz(DisasContext *s, DisasOps *o)
3034 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3035 gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3036 tcg_temp_free_i32(l);
3037 return NO_EXIT;
3040 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
3042 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3043 return NO_EXIT;
3046 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
3048 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3049 return NO_EXIT;
3052 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
3054 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3055 return NO_EXIT;
3058 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
3060 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3061 return NO_EXIT;
3064 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
3066 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3067 return NO_EXIT;
3070 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
3072 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3073 return_low128(o->out2);
3074 return NO_EXIT;
3077 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
3079 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3080 return_low128(o->out2);
3081 return NO_EXIT;
3084 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
3086 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3087 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3088 tcg_temp_free_i64(r3);
3089 return NO_EXIT;
3092 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
3094 int r3 = get_field(s->fields, r3);
3095 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3096 return NO_EXIT;
3099 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
3101 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3102 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3103 tcg_temp_free_i64(r3);
3104 return NO_EXIT;
3107 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
3109 int r3 = get_field(s->fields, r3);
3110 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3111 return NO_EXIT;
3114 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
3116 TCGv_i64 z, n;
3117 z = tcg_const_i64(0);
3118 n = tcg_temp_new_i64();
3119 tcg_gen_neg_i64(n, o->in2);
3120 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3121 tcg_temp_free_i64(n);
3122 tcg_temp_free_i64(z);
3123 return NO_EXIT;
3126 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
3128 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3129 return NO_EXIT;
3132 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
3134 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3135 return NO_EXIT;
3138 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
3140 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3141 tcg_gen_mov_i64(o->out2, o->in2);
3142 return NO_EXIT;
3145 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
3147 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3148 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3149 tcg_temp_free_i32(l);
3150 set_cc_static(s);
3151 return NO_EXIT;
3154 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
3156 tcg_gen_neg_i64(o->out, o->in2);
3157 return NO_EXIT;
3160 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
3162 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3163 return NO_EXIT;
3166 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
3168 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3169 return NO_EXIT;
3172 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
3174 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3175 tcg_gen_mov_i64(o->out2, o->in2);
3176 return NO_EXIT;
3179 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
3181 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3182 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3183 tcg_temp_free_i32(l);
3184 set_cc_static(s);
3185 return NO_EXIT;
3188 static ExitStatus op_or(DisasContext *s, DisasOps *o)
3190 tcg_gen_or_i64(o->out, o->in1, o->in2);
3191 return NO_EXIT;
3194 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
3196 int shift = s->insn->data & 0xff;
3197 int size = s->insn->data >> 8;
3198 uint64_t mask = ((1ull << size) - 1) << shift;
3200 assert(!o->g_in2);
3201 tcg_gen_shli_i64(o->in2, o->in2, shift);
3202 tcg_gen_or_i64(o->out, o->in1, o->in2);
3204 /* Produce the CC from only the bits manipulated. */
3205 tcg_gen_andi_i64(cc_dst, o->out, mask);
3206 set_cc_nz_u64(s, cc_dst);
3207 return NO_EXIT;
3210 static ExitStatus op_pack(DisasContext *s, DisasOps *o)
3212 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3213 gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3214 tcg_temp_free_i32(l);
3215 return NO_EXIT;
3218 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
3220 gen_helper_popcnt(o->out, o->in2);
3221 return NO_EXIT;
3224 #ifndef CONFIG_USER_ONLY
3225 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
3227 check_privileged(s);
3228 gen_helper_ptlb(cpu_env);
3229 return NO_EXIT;
3231 #endif
3233 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
3235 int i3 = get_field(s->fields, i3);
3236 int i4 = get_field(s->fields, i4);
3237 int i5 = get_field(s->fields, i5);
3238 int do_zero = i4 & 0x80;
3239 uint64_t mask, imask, pmask;
3240 int pos, len, rot;
3242 /* Adjust the arguments for the specific insn. */
3243 switch (s->fields->op2) {
3244 case 0x55: /* risbg */
3245 i3 &= 63;
3246 i4 &= 63;
3247 pmask = ~0;
3248 break;
3249 case 0x5d: /* risbhg */
3250 i3 &= 31;
3251 i4 &= 31;
3252 pmask = 0xffffffff00000000ull;
3253 break;
3254 case 0x51: /* risblg */
3255 i3 &= 31;
3256 i4 &= 31;
3257 pmask = 0x00000000ffffffffull;
3258 break;
3259 default:
3260 abort();
3263 /* MASK is the set of bits to be inserted from R2.
3264 Take care for I3/I4 wraparound. */
3265 mask = pmask >> i3;
3266 if (i3 <= i4) {
3267 mask ^= pmask >> i4 >> 1;
3268 } else {
3269 mask |= ~(pmask >> i4 >> 1);
3271 mask &= pmask;
3273 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3274 insns, we need to keep the other half of the register. */
3275 imask = ~mask | ~pmask;
3276 if (do_zero) {
3277 if (s->fields->op2 == 0x55) {
3278 imask = 0;
3279 } else {
3280 imask = ~pmask;
3284 len = i4 - i3 + 1;
3285 pos = 63 - i4;
3286 rot = i5 & 63;
3287 if (s->fields->op2 == 0x5d) {
3288 pos += 32;
3291 /* In some cases we can implement this with extract. */
3292 if (imask == 0 && pos == 0 && len > 0 && rot + len <= 64) {
3293 tcg_gen_extract_i64(o->out, o->in2, rot, len);
3294 return NO_EXIT;
3297 /* In some cases we can implement this with deposit. */
3298 if (len > 0 && (imask == 0 || ~mask == imask)) {
3299 /* Note that we rotate the bits to be inserted to the lsb, not to
3300 the position as described in the PoO. */
3301 rot = (rot - pos) & 63;
3302 } else {
3303 pos = -1;
3306 /* Rotate the input as necessary. */
3307 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3309 /* Insert the selected bits into the output. */
3310 if (pos >= 0) {
3311 if (imask == 0) {
3312 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3313 } else {
3314 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3316 } else if (imask == 0) {
3317 tcg_gen_andi_i64(o->out, o->in2, mask);
3318 } else {
3319 tcg_gen_andi_i64(o->in2, o->in2, mask);
3320 tcg_gen_andi_i64(o->out, o->out, imask);
3321 tcg_gen_or_i64(o->out, o->out, o->in2);
3323 return NO_EXIT;
3326 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
3328 int i3 = get_field(s->fields, i3);
3329 int i4 = get_field(s->fields, i4);
3330 int i5 = get_field(s->fields, i5);
3331 uint64_t mask;
3333 /* If this is a test-only form, arrange to discard the result. */
3334 if (i3 & 0x80) {
3335 o->out = tcg_temp_new_i64();
3336 o->g_out = false;
3339 i3 &= 63;
3340 i4 &= 63;
3341 i5 &= 63;
3343 /* MASK is the set of bits to be operated on from R2.
3344 Take care for I3/I4 wraparound. */
3345 mask = ~0ull >> i3;
3346 if (i3 <= i4) {
3347 mask ^= ~0ull >> i4 >> 1;
3348 } else {
3349 mask |= ~(~0ull >> i4 >> 1);
3352 /* Rotate the input as necessary. */
3353 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3355 /* Operate. */
3356 switch (s->fields->op2) {
3357 case 0x55: /* AND */
3358 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3359 tcg_gen_and_i64(o->out, o->out, o->in2);
3360 break;
3361 case 0x56: /* OR */
3362 tcg_gen_andi_i64(o->in2, o->in2, mask);
3363 tcg_gen_or_i64(o->out, o->out, o->in2);
3364 break;
3365 case 0x57: /* XOR */
3366 tcg_gen_andi_i64(o->in2, o->in2, mask);
3367 tcg_gen_xor_i64(o->out, o->out, o->in2);
3368 break;
3369 default:
3370 abort();
3373 /* Set the CC. */
3374 tcg_gen_andi_i64(cc_dst, o->out, mask);
3375 set_cc_nz_u64(s, cc_dst);
3376 return NO_EXIT;
3379 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
3381 tcg_gen_bswap16_i64(o->out, o->in2);
3382 return NO_EXIT;
3385 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
3387 tcg_gen_bswap32_i64(o->out, o->in2);
3388 return NO_EXIT;
3391 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
3393 tcg_gen_bswap64_i64(o->out, o->in2);
3394 return NO_EXIT;
3397 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
3399 TCGv_i32 t1 = tcg_temp_new_i32();
3400 TCGv_i32 t2 = tcg_temp_new_i32();
3401 TCGv_i32 to = tcg_temp_new_i32();
3402 tcg_gen_extrl_i64_i32(t1, o->in1);
3403 tcg_gen_extrl_i64_i32(t2, o->in2);
3404 tcg_gen_rotl_i32(to, t1, t2);
3405 tcg_gen_extu_i32_i64(o->out, to);
3406 tcg_temp_free_i32(t1);
3407 tcg_temp_free_i32(t2);
3408 tcg_temp_free_i32(to);
3409 return NO_EXIT;
3412 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
3414 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3415 return NO_EXIT;
3418 #ifndef CONFIG_USER_ONLY
3419 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
3421 check_privileged(s);
3422 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3423 set_cc_static(s);
3424 return NO_EXIT;
3427 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
3429 check_privileged(s);
3430 gen_helper_sacf(cpu_env, o->in2);
3431 /* Addressing mode has changed, so end the block. */
3432 return EXIT_PC_STALE;
3434 #endif
3436 static ExitStatus op_sam(DisasContext *s, DisasOps *o)
3438 int sam = s->insn->data;
3439 TCGv_i64 tsam;
3440 uint64_t mask;
3442 switch (sam) {
3443 case 0:
3444 mask = 0xffffff;
3445 break;
3446 case 1:
3447 mask = 0x7fffffff;
3448 break;
3449 default:
3450 mask = -1;
3451 break;
3454 /* Bizarre but true, we check the address of the current insn for the
3455 specification exception, not the next to be executed. Thus the PoO
3456 documents that Bad Things Happen two bytes before the end. */
3457 if (s->pc & ~mask) {
3458 gen_program_exception(s, PGM_SPECIFICATION);
3459 return EXIT_NORETURN;
3461 s->next_pc &= mask;
3463 tsam = tcg_const_i64(sam);
3464 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3465 tcg_temp_free_i64(tsam);
3467 /* Always exit the TB, since we (may have) changed execution mode. */
3468 return EXIT_PC_STALE;
3471 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
3473 int r1 = get_field(s->fields, r1);
3474 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3475 return NO_EXIT;
3478 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
3480 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3481 return NO_EXIT;
3484 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
3486 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3487 return NO_EXIT;
3490 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
3492 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3493 return_low128(o->out2);
3494 return NO_EXIT;
3497 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
3499 gen_helper_sqeb(o->out, cpu_env, o->in2);
3500 return NO_EXIT;
3503 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
3505 gen_helper_sqdb(o->out, cpu_env, o->in2);
3506 return NO_EXIT;
3509 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
3511 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3512 return_low128(o->out2);
3513 return NO_EXIT;
3516 #ifndef CONFIG_USER_ONLY
3517 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
3519 check_privileged(s);
3520 potential_page_fault(s);
3521 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3522 set_cc_static(s);
3523 return NO_EXIT;
3526 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3528 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3529 check_privileged(s);
3530 potential_page_fault(s);
3531 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
3532 set_cc_static(s);
3533 tcg_temp_free_i32(r1);
3534 return NO_EXIT;
3536 #endif
3538 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3540 DisasCompare c;
3541 TCGv_i64 a;
3542 TCGLabel *lab;
3543 int r1;
3545 disas_jcc(s, &c, get_field(s->fields, m3));
3547 /* We want to store when the condition is fulfilled, so branch
3548 out when it's not */
3549 c.cond = tcg_invert_cond(c.cond);
3551 lab = gen_new_label();
3552 if (c.is_64) {
3553 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3554 } else {
3555 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3557 free_compare(&c);
3559 r1 = get_field(s->fields, r1);
3560 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3561 if (s->insn->data) {
3562 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3563 } else {
3564 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3566 tcg_temp_free_i64(a);
3568 gen_set_label(lab);
3569 return NO_EXIT;
3572 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3574 uint64_t sign = 1ull << s->insn->data;
3575 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3576 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3577 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3578 /* The arithmetic left shift is curious in that it does not affect
3579 the sign bit. Copy that over from the source unchanged. */
3580 tcg_gen_andi_i64(o->out, o->out, ~sign);
3581 tcg_gen_andi_i64(o->in1, o->in1, sign);
3582 tcg_gen_or_i64(o->out, o->out, o->in1);
3583 return NO_EXIT;
3586 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3588 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3589 return NO_EXIT;
3592 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3594 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3595 return NO_EXIT;
3598 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3600 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3601 return NO_EXIT;
3604 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3606 gen_helper_sfpc(cpu_env, o->in2);
3607 return NO_EXIT;
3610 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3612 gen_helper_sfas(cpu_env, o->in2);
3613 return NO_EXIT;
3616 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3618 int b2 = get_field(s->fields, b2);
3619 int d2 = get_field(s->fields, d2);
3620 TCGv_i64 t1 = tcg_temp_new_i64();
3621 TCGv_i64 t2 = tcg_temp_new_i64();
3622 int mask, pos, len;
3624 switch (s->fields->op2) {
3625 case 0x99: /* SRNM */
3626 pos = 0, len = 2;
3627 break;
3628 case 0xb8: /* SRNMB */
3629 pos = 0, len = 3;
3630 break;
3631 case 0xb9: /* SRNMT */
3632 pos = 4, len = 3;
3633 break;
3634 default:
3635 tcg_abort();
3637 mask = (1 << len) - 1;
3639 /* Insert the value into the appropriate field of the FPC. */
3640 if (b2 == 0) {
3641 tcg_gen_movi_i64(t1, d2 & mask);
3642 } else {
3643 tcg_gen_addi_i64(t1, regs[b2], d2);
3644 tcg_gen_andi_i64(t1, t1, mask);
3646 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3647 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3648 tcg_temp_free_i64(t1);
3650 /* Then install the new FPC to set the rounding mode in fpu_status. */
3651 gen_helper_sfpc(cpu_env, t2);
3652 tcg_temp_free_i64(t2);
3653 return NO_EXIT;
3656 #ifndef CONFIG_USER_ONLY
3657 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3659 check_privileged(s);
3660 tcg_gen_shri_i64(o->in2, o->in2, 4);
3661 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
3662 return NO_EXIT;
3665 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3667 check_privileged(s);
3668 gen_helper_sske(cpu_env, o->in1, o->in2);
3669 return NO_EXIT;
3672 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3674 check_privileged(s);
3675 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3676 return NO_EXIT;
3679 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3681 check_privileged(s);
3682 /* ??? Surely cpu address != cpu number. In any case the previous
3683 version of this stored more than the required half-word, so it
3684 is unlikely this has ever been tested. */
3685 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3686 return NO_EXIT;
3689 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3691 gen_helper_stck(o->out, cpu_env);
3692 /* ??? We don't implement clock states. */
3693 gen_op_movi_cc(s, 0);
3694 return NO_EXIT;
3697 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3699 TCGv_i64 c1 = tcg_temp_new_i64();
3700 TCGv_i64 c2 = tcg_temp_new_i64();
3701 gen_helper_stck(c1, cpu_env);
3702 /* Shift the 64-bit value into its place as a zero-extended
3703 104-bit value. Note that "bit positions 64-103 are always
3704 non-zero so that they compare differently to STCK"; we set
3705 the least significant bit to 1. */
3706 tcg_gen_shli_i64(c2, c1, 56);
3707 tcg_gen_shri_i64(c1, c1, 8);
3708 tcg_gen_ori_i64(c2, c2, 0x10000);
3709 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3710 tcg_gen_addi_i64(o->in2, o->in2, 8);
3711 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3712 tcg_temp_free_i64(c1);
3713 tcg_temp_free_i64(c2);
3714 /* ??? We don't implement clock states. */
3715 gen_op_movi_cc(s, 0);
3716 return NO_EXIT;
3719 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3721 check_privileged(s);
3722 gen_helper_sckc(cpu_env, o->in2);
3723 return NO_EXIT;
3726 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3728 check_privileged(s);
3729 gen_helper_stckc(o->out, cpu_env);
3730 return NO_EXIT;
3733 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3735 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3736 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3737 check_privileged(s);
3738 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3739 tcg_temp_free_i32(r1);
3740 tcg_temp_free_i32(r3);
3741 return NO_EXIT;
3744 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3746 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3747 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3748 check_privileged(s);
3749 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3750 tcg_temp_free_i32(r1);
3751 tcg_temp_free_i32(r3);
3752 return NO_EXIT;
3755 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3757 TCGv_i64 t1 = tcg_temp_new_i64();
3759 check_privileged(s);
3760 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3761 tcg_gen_ld32u_i64(t1, cpu_env, offsetof(CPUS390XState, machine_type));
3762 tcg_gen_deposit_i64(o->out, o->out, t1, 32, 32);
3763 tcg_temp_free_i64(t1);
3765 return NO_EXIT;
3768 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3770 check_privileged(s);
3771 gen_helper_spt(cpu_env, o->in2);
3772 return NO_EXIT;
3775 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3777 check_privileged(s);
3778 gen_helper_stfl(cpu_env);
3779 return NO_EXIT;
3782 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3784 check_privileged(s);
3785 gen_helper_stpt(o->out, cpu_env);
3786 return NO_EXIT;
3789 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3791 check_privileged(s);
3792 potential_page_fault(s);
3793 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3794 set_cc_static(s);
3795 return NO_EXIT;
3798 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3800 check_privileged(s);
3801 gen_helper_spx(cpu_env, o->in2);
3802 return NO_EXIT;
3805 static ExitStatus op_xsch(DisasContext *s, DisasOps *o)
3807 check_privileged(s);
3808 potential_page_fault(s);
3809 gen_helper_xsch(cpu_env, regs[1]);
3810 set_cc_static(s);
3811 return NO_EXIT;
3814 static ExitStatus op_csch(DisasContext *s, DisasOps *o)
3816 check_privileged(s);
3817 potential_page_fault(s);
3818 gen_helper_csch(cpu_env, regs[1]);
3819 set_cc_static(s);
3820 return NO_EXIT;
3823 static ExitStatus op_hsch(DisasContext *s, DisasOps *o)
3825 check_privileged(s);
3826 potential_page_fault(s);
3827 gen_helper_hsch(cpu_env, regs[1]);
3828 set_cc_static(s);
3829 return NO_EXIT;
3832 static ExitStatus op_msch(DisasContext *s, DisasOps *o)
3834 check_privileged(s);
3835 potential_page_fault(s);
3836 gen_helper_msch(cpu_env, regs[1], o->in2);
3837 set_cc_static(s);
3838 return NO_EXIT;
3841 static ExitStatus op_rchp(DisasContext *s, DisasOps *o)
3843 check_privileged(s);
3844 potential_page_fault(s);
3845 gen_helper_rchp(cpu_env, regs[1]);
3846 set_cc_static(s);
3847 return NO_EXIT;
3850 static ExitStatus op_rsch(DisasContext *s, DisasOps *o)
3852 check_privileged(s);
3853 potential_page_fault(s);
3854 gen_helper_rsch(cpu_env, regs[1]);
3855 set_cc_static(s);
3856 return NO_EXIT;
3859 static ExitStatus op_ssch(DisasContext *s, DisasOps *o)
3861 check_privileged(s);
3862 potential_page_fault(s);
3863 gen_helper_ssch(cpu_env, regs[1], o->in2);
3864 set_cc_static(s);
3865 return NO_EXIT;
3868 static ExitStatus op_stsch(DisasContext *s, DisasOps *o)
3870 check_privileged(s);
3871 potential_page_fault(s);
3872 gen_helper_stsch(cpu_env, regs[1], o->in2);
3873 set_cc_static(s);
3874 return NO_EXIT;
3877 static ExitStatus op_tsch(DisasContext *s, DisasOps *o)
3879 check_privileged(s);
3880 potential_page_fault(s);
3881 gen_helper_tsch(cpu_env, regs[1], o->in2);
3882 set_cc_static(s);
3883 return NO_EXIT;
3886 static ExitStatus op_chsc(DisasContext *s, DisasOps *o)
3888 check_privileged(s);
3889 potential_page_fault(s);
3890 gen_helper_chsc(cpu_env, o->in2);
3891 set_cc_static(s);
3892 return NO_EXIT;
3895 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
3897 check_privileged(s);
3898 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
3899 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
3900 return NO_EXIT;
3903 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3905 uint64_t i2 = get_field(s->fields, i2);
3906 TCGv_i64 t;
3908 check_privileged(s);
3910 /* It is important to do what the instruction name says: STORE THEN.
3911 If we let the output hook perform the store then if we fault and
3912 restart, we'll have the wrong SYSTEM MASK in place. */
3913 t = tcg_temp_new_i64();
3914 tcg_gen_shri_i64(t, psw_mask, 56);
3915 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3916 tcg_temp_free_i64(t);
3918 if (s->fields->op == 0xac) {
3919 tcg_gen_andi_i64(psw_mask, psw_mask,
3920 (i2 << 56) | 0x00ffffffffffffffull);
3921 } else {
3922 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3924 return NO_EXIT;
3927 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
3929 check_privileged(s);
3930 potential_page_fault(s);
3931 gen_helper_stura(cpu_env, o->in2, o->in1);
3932 return NO_EXIT;
3935 static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
3937 check_privileged(s);
3938 potential_page_fault(s);
3939 gen_helper_sturg(cpu_env, o->in2, o->in1);
3940 return NO_EXIT;
3942 #endif
3944 static ExitStatus op_stfle(DisasContext *s, DisasOps *o)
3946 potential_page_fault(s);
3947 gen_helper_stfle(cc_op, cpu_env, o->in2);
3948 set_cc_static(s);
3949 return NO_EXIT;
3952 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3954 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3955 return NO_EXIT;
3958 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3960 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3961 return NO_EXIT;
3964 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3966 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3967 return NO_EXIT;
3970 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3972 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3973 return NO_EXIT;
3976 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3978 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3979 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3980 gen_helper_stam(cpu_env, r1, o->in2, r3);
3981 tcg_temp_free_i32(r1);
3982 tcg_temp_free_i32(r3);
3983 return NO_EXIT;
3986 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3988 int m3 = get_field(s->fields, m3);
3989 int pos, base = s->insn->data;
3990 TCGv_i64 tmp = tcg_temp_new_i64();
3992 pos = base + ctz32(m3) * 8;
3993 switch (m3) {
3994 case 0xf:
3995 /* Effectively a 32-bit store. */
3996 tcg_gen_shri_i64(tmp, o->in1, pos);
3997 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3998 break;
4000 case 0xc:
4001 case 0x6:
4002 case 0x3:
4003 /* Effectively a 16-bit store. */
4004 tcg_gen_shri_i64(tmp, o->in1, pos);
4005 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4006 break;
4008 case 0x8:
4009 case 0x4:
4010 case 0x2:
4011 case 0x1:
4012 /* Effectively an 8-bit store. */
4013 tcg_gen_shri_i64(tmp, o->in1, pos);
4014 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4015 break;
4017 default:
4018 /* This is going to be a sequence of shifts and stores. */
4019 pos = base + 32 - 8;
4020 while (m3) {
4021 if (m3 & 0x8) {
4022 tcg_gen_shri_i64(tmp, o->in1, pos);
4023 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4024 tcg_gen_addi_i64(o->in2, o->in2, 1);
4026 m3 = (m3 << 1) & 0xf;
4027 pos -= 8;
4029 break;
4031 tcg_temp_free_i64(tmp);
4032 return NO_EXIT;
4035 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
4037 int r1 = get_field(s->fields, r1);
4038 int r3 = get_field(s->fields, r3);
4039 int size = s->insn->data;
4040 TCGv_i64 tsize = tcg_const_i64(size);
4042 while (1) {
4043 if (size == 8) {
4044 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4045 } else {
4046 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4048 if (r1 == r3) {
4049 break;
4051 tcg_gen_add_i64(o->in2, o->in2, tsize);
4052 r1 = (r1 + 1) & 15;
4055 tcg_temp_free_i64(tsize);
4056 return NO_EXIT;
4059 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
4061 int r1 = get_field(s->fields, r1);
4062 int r3 = get_field(s->fields, r3);
4063 TCGv_i64 t = tcg_temp_new_i64();
4064 TCGv_i64 t4 = tcg_const_i64(4);
4065 TCGv_i64 t32 = tcg_const_i64(32);
4067 while (1) {
4068 tcg_gen_shl_i64(t, regs[r1], t32);
4069 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4070 if (r1 == r3) {
4071 break;
4073 tcg_gen_add_i64(o->in2, o->in2, t4);
4074 r1 = (r1 + 1) & 15;
4077 tcg_temp_free_i64(t);
4078 tcg_temp_free_i64(t4);
4079 tcg_temp_free_i64(t32);
4080 return NO_EXIT;
4083 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
4085 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
4086 set_cc_static(s);
4087 return_low128(o->in2);
4088 return NO_EXIT;
4091 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
4093 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4094 return NO_EXIT;
4097 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
4099 DisasCompare cmp;
4100 TCGv_i64 borrow;
4102 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4104 /* The !borrow flag is the msb of CC. Since we want the inverse of
4105 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4106 disas_jcc(s, &cmp, 8 | 4);
4107 borrow = tcg_temp_new_i64();
4108 if (cmp.is_64) {
4109 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
4110 } else {
4111 TCGv_i32 t = tcg_temp_new_i32();
4112 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
4113 tcg_gen_extu_i32_i64(borrow, t);
4114 tcg_temp_free_i32(t);
4116 free_compare(&cmp);
4118 tcg_gen_sub_i64(o->out, o->out, borrow);
4119 tcg_temp_free_i64(borrow);
4120 return NO_EXIT;
4123 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
4125 TCGv_i32 t;
4127 update_psw_addr(s);
4128 update_cc_op(s);
4130 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
4131 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4132 tcg_temp_free_i32(t);
4134 t = tcg_const_i32(s->ilen);
4135 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4136 tcg_temp_free_i32(t);
4138 gen_exception(EXCP_SVC);
4139 return EXIT_NORETURN;
4142 static ExitStatus op_tam(DisasContext *s, DisasOps *o)
4144 int cc = 0;
4146 cc |= (s->tb->flags & FLAG_MASK_64) ? 2 : 0;
4147 cc |= (s->tb->flags & FLAG_MASK_32) ? 1 : 0;
4148 gen_op_movi_cc(s, cc);
4149 return NO_EXIT;
4152 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
4154 gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4155 set_cc_static(s);
4156 return NO_EXIT;
4159 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
4161 gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4162 set_cc_static(s);
4163 return NO_EXIT;
4166 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
4168 gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4169 set_cc_static(s);
4170 return NO_EXIT;
4173 #ifndef CONFIG_USER_ONLY
4175 static ExitStatus op_testblock(DisasContext *s, DisasOps *o)
4177 check_privileged(s);
4178 gen_helper_testblock(cc_op, cpu_env, o->in2);
4179 set_cc_static(s);
4180 return NO_EXIT;
4183 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
4185 gen_helper_tprot(cc_op, o->addr1, o->in2);
4186 set_cc_static(s);
4187 return NO_EXIT;
4190 #endif
4192 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
4194 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4195 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4196 tcg_temp_free_i32(l);
4197 set_cc_static(s);
4198 return NO_EXIT;
4201 static ExitStatus op_tre(DisasContext *s, DisasOps *o)
4203 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4204 return_low128(o->out2);
4205 set_cc_static(s);
4206 return NO_EXIT;
4209 static ExitStatus op_trt(DisasContext *s, DisasOps *o)
4211 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4212 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4213 tcg_temp_free_i32(l);
4214 set_cc_static(s);
4215 return NO_EXIT;
4218 static ExitStatus op_ts(DisasContext *s, DisasOps *o)
4220 TCGv_i32 t1 = tcg_const_i32(0xff);
4221 tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4222 tcg_gen_extract_i32(cc_op, t1, 7, 1);
4223 tcg_temp_free_i32(t1);
4224 set_cc_static(s);
4225 return NO_EXIT;
4228 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
4230 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4231 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4232 tcg_temp_free_i32(l);
4233 return NO_EXIT;
4236 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
4238 int d1 = get_field(s->fields, d1);
4239 int d2 = get_field(s->fields, d2);
4240 int b1 = get_field(s->fields, b1);
4241 int b2 = get_field(s->fields, b2);
4242 int l = get_field(s->fields, l1);
4243 TCGv_i32 t32;
4245 o->addr1 = get_address(s, 0, b1, d1);
4247 /* If the addresses are identical, this is a store/memset of zero. */
4248 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4249 o->in2 = tcg_const_i64(0);
4251 l++;
4252 while (l >= 8) {
4253 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4254 l -= 8;
4255 if (l > 0) {
4256 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4259 if (l >= 4) {
4260 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4261 l -= 4;
4262 if (l > 0) {
4263 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4266 if (l >= 2) {
4267 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4268 l -= 2;
4269 if (l > 0) {
4270 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4273 if (l) {
4274 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4276 gen_op_movi_cc(s, 0);
4277 return NO_EXIT;
4280 /* But in general we'll defer to a helper. */
4281 o->in2 = get_address(s, 0, b2, d2);
4282 t32 = tcg_const_i32(l);
4283 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4284 tcg_temp_free_i32(t32);
4285 set_cc_static(s);
4286 return NO_EXIT;
4289 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
4291 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4292 return NO_EXIT;
4295 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
4297 int shift = s->insn->data & 0xff;
4298 int size = s->insn->data >> 8;
4299 uint64_t mask = ((1ull << size) - 1) << shift;
4301 assert(!o->g_in2);
4302 tcg_gen_shli_i64(o->in2, o->in2, shift);
4303 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4305 /* Produce the CC from only the bits manipulated. */
4306 tcg_gen_andi_i64(cc_dst, o->out, mask);
4307 set_cc_nz_u64(s, cc_dst);
4308 return NO_EXIT;
4311 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
4313 o->out = tcg_const_i64(0);
4314 return NO_EXIT;
4317 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
4319 o->out = tcg_const_i64(0);
4320 o->out2 = o->out;
4321 o->g_out2 = true;
4322 return NO_EXIT;
4325 /* ====================================================================== */
4326 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4327 the original inputs), update the various cc data structures in order to
4328 be able to compute the new condition code. */
4330 static void cout_abs32(DisasContext *s, DisasOps *o)
4332 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4335 static void cout_abs64(DisasContext *s, DisasOps *o)
4337 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4340 static void cout_adds32(DisasContext *s, DisasOps *o)
4342 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4345 static void cout_adds64(DisasContext *s, DisasOps *o)
4347 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4350 static void cout_addu32(DisasContext *s, DisasOps *o)
4352 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
4355 static void cout_addu64(DisasContext *s, DisasOps *o)
4357 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
4360 static void cout_addc32(DisasContext *s, DisasOps *o)
4362 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
4365 static void cout_addc64(DisasContext *s, DisasOps *o)
4367 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
4370 static void cout_cmps32(DisasContext *s, DisasOps *o)
4372 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4375 static void cout_cmps64(DisasContext *s, DisasOps *o)
4377 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4380 static void cout_cmpu32(DisasContext *s, DisasOps *o)
4382 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
4385 static void cout_cmpu64(DisasContext *s, DisasOps *o)
4387 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
4390 static void cout_f32(DisasContext *s, DisasOps *o)
4392 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
4395 static void cout_f64(DisasContext *s, DisasOps *o)
4397 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
4400 static void cout_f128(DisasContext *s, DisasOps *o)
4402 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
4405 static void cout_nabs32(DisasContext *s, DisasOps *o)
4407 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
4410 static void cout_nabs64(DisasContext *s, DisasOps *o)
4412 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
4415 static void cout_neg32(DisasContext *s, DisasOps *o)
4417 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
4420 static void cout_neg64(DisasContext *s, DisasOps *o)
4422 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
4425 static void cout_nz32(DisasContext *s, DisasOps *o)
4427 tcg_gen_ext32u_i64(cc_dst, o->out);
4428 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
4431 static void cout_nz64(DisasContext *s, DisasOps *o)
4433 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
4436 static void cout_s32(DisasContext *s, DisasOps *o)
4438 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
4441 static void cout_s64(DisasContext *s, DisasOps *o)
4443 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
4446 static void cout_subs32(DisasContext *s, DisasOps *o)
4448 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
4451 static void cout_subs64(DisasContext *s, DisasOps *o)
4453 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
4456 static void cout_subu32(DisasContext *s, DisasOps *o)
4458 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
4461 static void cout_subu64(DisasContext *s, DisasOps *o)
4463 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
4466 static void cout_subb32(DisasContext *s, DisasOps *o)
4468 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
4471 static void cout_subb64(DisasContext *s, DisasOps *o)
4473 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
4476 static void cout_tm32(DisasContext *s, DisasOps *o)
4478 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
4481 static void cout_tm64(DisasContext *s, DisasOps *o)
4483 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
4486 /* ====================================================================== */
4487 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4488 with the TCG register to which we will write. Used in combination with
4489 the "wout" generators, in some cases we need a new temporary, and in
4490 some cases we can write to a TCG global. */
4492 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
4494 o->out = tcg_temp_new_i64();
4496 #define SPEC_prep_new 0
4498 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
4500 o->out = tcg_temp_new_i64();
4501 o->out2 = tcg_temp_new_i64();
4503 #define SPEC_prep_new_P 0
4505 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4507 o->out = regs[get_field(f, r1)];
4508 o->g_out = true;
4510 #define SPEC_prep_r1 0
4512 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
4514 int r1 = get_field(f, r1);
4515 o->out = regs[r1];
4516 o->out2 = regs[r1 + 1];
4517 o->g_out = o->g_out2 = true;
4519 #define SPEC_prep_r1_P SPEC_r1_even
4521 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4523 o->out = fregs[get_field(f, r1)];
4524 o->g_out = true;
4526 #define SPEC_prep_f1 0
4528 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4530 int r1 = get_field(f, r1);
4531 o->out = fregs[r1];
4532 o->out2 = fregs[r1 + 2];
4533 o->g_out = o->g_out2 = true;
4535 #define SPEC_prep_x1 SPEC_r1_f128
4537 /* ====================================================================== */
4538 /* The "Write OUTput" generators. These generally perform some non-trivial
4539 copy of data to TCG globals, or to main memory. The trivial cases are
4540 generally handled by having a "prep" generator install the TCG global
4541 as the destination of the operation. */
4543 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4545 store_reg(get_field(f, r1), o->out);
4547 #define SPEC_wout_r1 0
4549 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4551 int r1 = get_field(f, r1);
4552 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
4554 #define SPEC_wout_r1_8 0
4556 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4558 int r1 = get_field(f, r1);
4559 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
4561 #define SPEC_wout_r1_16 0
4563 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4565 store_reg32_i64(get_field(f, r1), o->out);
4567 #define SPEC_wout_r1_32 0
4569 static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
4571 store_reg32h_i64(get_field(f, r1), o->out);
4573 #define SPEC_wout_r1_32h 0
4575 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4577 int r1 = get_field(f, r1);
4578 store_reg32_i64(r1, o->out);
4579 store_reg32_i64(r1 + 1, o->out2);
4581 #define SPEC_wout_r1_P32 SPEC_r1_even
4583 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4585 int r1 = get_field(f, r1);
4586 store_reg32_i64(r1 + 1, o->out);
4587 tcg_gen_shri_i64(o->out, o->out, 32);
4588 store_reg32_i64(r1, o->out);
4590 #define SPEC_wout_r1_D32 SPEC_r1_even
4592 static void wout_r3_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4594 int r3 = get_field(f, r3);
4595 store_reg32_i64(r3, o->out);
4596 store_reg32_i64(r3 + 1, o->out2);
4598 #define SPEC_wout_r3_P32 SPEC_r3_even
4600 static void wout_r3_P64(DisasContext *s, DisasFields *f, DisasOps *o)
4602 int r3 = get_field(f, r3);
4603 store_reg(r3, o->out);
4604 store_reg(r3 + 1, o->out2);
4606 #define SPEC_wout_r3_P64 SPEC_r3_even
4608 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4610 store_freg32_i64(get_field(f, r1), o->out);
4612 #define SPEC_wout_e1 0
4614 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4616 store_freg(get_field(f, r1), o->out);
4618 #define SPEC_wout_f1 0
4620 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4622 int f1 = get_field(s->fields, r1);
4623 store_freg(f1, o->out);
4624 store_freg(f1 + 2, o->out2);
4626 #define SPEC_wout_x1 SPEC_r1_f128
4628 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4630 if (get_field(f, r1) != get_field(f, r2)) {
4631 store_reg32_i64(get_field(f, r1), o->out);
4634 #define SPEC_wout_cond_r1r2_32 0
4636 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
4638 if (get_field(f, r1) != get_field(f, r2)) {
4639 store_freg32_i64(get_field(f, r1), o->out);
4642 #define SPEC_wout_cond_e1e2 0
4644 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4646 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
4648 #define SPEC_wout_m1_8 0
4650 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4652 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
4654 #define SPEC_wout_m1_16 0
4656 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4658 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4660 #define SPEC_wout_m1_32 0
4662 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4664 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4666 #define SPEC_wout_m1_64 0
4668 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4670 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
4672 #define SPEC_wout_m2_32 0
4674 static void wout_in2_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4676 store_reg(get_field(f, r1), o->in2);
4678 #define SPEC_wout_in2_r1 0
4680 static void wout_in2_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4682 store_reg32_i64(get_field(f, r1), o->in2);
4684 #define SPEC_wout_in2_r1_32 0
4686 /* ====================================================================== */
4687 /* The "INput 1" generators. These load the first operand to an insn. */
4689 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4691 o->in1 = load_reg(get_field(f, r1));
4693 #define SPEC_in1_r1 0
4695 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4697 o->in1 = regs[get_field(f, r1)];
4698 o->g_in1 = true;
4700 #define SPEC_in1_r1_o 0
4702 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4704 o->in1 = tcg_temp_new_i64();
4705 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
4707 #define SPEC_in1_r1_32s 0
4709 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4711 o->in1 = tcg_temp_new_i64();
4712 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
4714 #define SPEC_in1_r1_32u 0
4716 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4718 o->in1 = tcg_temp_new_i64();
4719 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
4721 #define SPEC_in1_r1_sr32 0
4723 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
4725 o->in1 = load_reg(get_field(f, r1) + 1);
4727 #define SPEC_in1_r1p1 SPEC_r1_even
4729 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4731 o->in1 = tcg_temp_new_i64();
4732 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
4734 #define SPEC_in1_r1p1_32s SPEC_r1_even
4736 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4738 o->in1 = tcg_temp_new_i64();
4739 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
4741 #define SPEC_in1_r1p1_32u SPEC_r1_even
4743 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4745 int r1 = get_field(f, r1);
4746 o->in1 = tcg_temp_new_i64();
4747 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
4749 #define SPEC_in1_r1_D32 SPEC_r1_even
4751 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4753 o->in1 = load_reg(get_field(f, r2));
4755 #define SPEC_in1_r2 0
4757 static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4759 o->in1 = tcg_temp_new_i64();
4760 tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
4762 #define SPEC_in1_r2_sr32 0
4764 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4766 o->in1 = load_reg(get_field(f, r3));
4768 #define SPEC_in1_r3 0
4770 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4772 o->in1 = regs[get_field(f, r3)];
4773 o->g_in1 = true;
4775 #define SPEC_in1_r3_o 0
4777 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4779 o->in1 = tcg_temp_new_i64();
4780 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
4782 #define SPEC_in1_r3_32s 0
4784 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4786 o->in1 = tcg_temp_new_i64();
4787 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
4789 #define SPEC_in1_r3_32u 0
4791 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4793 int r3 = get_field(f, r3);
4794 o->in1 = tcg_temp_new_i64();
4795 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
4797 #define SPEC_in1_r3_D32 SPEC_r3_even
4799 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4801 o->in1 = load_freg32_i64(get_field(f, r1));
4803 #define SPEC_in1_e1 0
4805 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4807 o->in1 = fregs[get_field(f, r1)];
4808 o->g_in1 = true;
4810 #define SPEC_in1_f1_o 0
4812 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4814 int r1 = get_field(f, r1);
4815 o->out = fregs[r1];
4816 o->out2 = fregs[r1 + 2];
4817 o->g_out = o->g_out2 = true;
4819 #define SPEC_in1_x1_o SPEC_r1_f128
4821 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4823 o->in1 = fregs[get_field(f, r3)];
4824 o->g_in1 = true;
4826 #define SPEC_in1_f3_o 0
4828 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
4830 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
4832 #define SPEC_in1_la1 0
4834 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
4836 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4837 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4839 #define SPEC_in1_la2 0
4841 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4843 in1_la1(s, f, o);
4844 o->in1 = tcg_temp_new_i64();
4845 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
4847 #define SPEC_in1_m1_8u 0
4849 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4851 in1_la1(s, f, o);
4852 o->in1 = tcg_temp_new_i64();
4853 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
4855 #define SPEC_in1_m1_16s 0
4857 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4859 in1_la1(s, f, o);
4860 o->in1 = tcg_temp_new_i64();
4861 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
4863 #define SPEC_in1_m1_16u 0
4865 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4867 in1_la1(s, f, o);
4868 o->in1 = tcg_temp_new_i64();
4869 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4871 #define SPEC_in1_m1_32s 0
4873 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4875 in1_la1(s, f, o);
4876 o->in1 = tcg_temp_new_i64();
4877 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4879 #define SPEC_in1_m1_32u 0
4881 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4883 in1_la1(s, f, o);
4884 o->in1 = tcg_temp_new_i64();
4885 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4887 #define SPEC_in1_m1_64 0
4889 /* ====================================================================== */
4890 /* The "INput 2" generators. These load the second operand to an insn. */
4892 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4894 o->in2 = regs[get_field(f, r1)];
4895 o->g_in2 = true;
4897 #define SPEC_in2_r1_o 0
4899 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4901 o->in2 = tcg_temp_new_i64();
4902 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
4904 #define SPEC_in2_r1_16u 0
4906 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4908 o->in2 = tcg_temp_new_i64();
4909 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
4911 #define SPEC_in2_r1_32u 0
4913 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4915 int r1 = get_field(f, r1);
4916 o->in2 = tcg_temp_new_i64();
4917 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
4919 #define SPEC_in2_r1_D32 SPEC_r1_even
4921 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4923 o->in2 = load_reg(get_field(f, r2));
4925 #define SPEC_in2_r2 0
4927 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4929 o->in2 = regs[get_field(f, r2)];
4930 o->g_in2 = true;
4932 #define SPEC_in2_r2_o 0
4934 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4936 int r2 = get_field(f, r2);
4937 if (r2 != 0) {
4938 o->in2 = load_reg(r2);
4941 #define SPEC_in2_r2_nz 0
4943 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4945 o->in2 = tcg_temp_new_i64();
4946 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4948 #define SPEC_in2_r2_8s 0
4950 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4952 o->in2 = tcg_temp_new_i64();
4953 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4955 #define SPEC_in2_r2_8u 0
4957 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4959 o->in2 = tcg_temp_new_i64();
4960 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4962 #define SPEC_in2_r2_16s 0
4964 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4966 o->in2 = tcg_temp_new_i64();
4967 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4969 #define SPEC_in2_r2_16u 0
4971 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4973 o->in2 = load_reg(get_field(f, r3));
4975 #define SPEC_in2_r3 0
4977 static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4979 o->in2 = tcg_temp_new_i64();
4980 tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
4982 #define SPEC_in2_r3_sr32 0
4984 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4986 o->in2 = tcg_temp_new_i64();
4987 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4989 #define SPEC_in2_r2_32s 0
4991 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4993 o->in2 = tcg_temp_new_i64();
4994 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4996 #define SPEC_in2_r2_32u 0
4998 static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5000 o->in2 = tcg_temp_new_i64();
5001 tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
5003 #define SPEC_in2_r2_sr32 0
5005 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
5007 o->in2 = load_freg32_i64(get_field(f, r2));
5009 #define SPEC_in2_e2 0
5011 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5013 o->in2 = fregs[get_field(f, r2)];
5014 o->g_in2 = true;
5016 #define SPEC_in2_f2_o 0
5018 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5020 int r2 = get_field(f, r2);
5021 o->in1 = fregs[r2];
5022 o->in2 = fregs[r2 + 2];
5023 o->g_in1 = o->g_in2 = true;
5025 #define SPEC_in2_x2_o SPEC_r2_f128
5027 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
5029 o->in2 = get_address(s, 0, get_field(f, r2), 0);
5031 #define SPEC_in2_ra2 0
5033 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
5035 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5036 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5038 #define SPEC_in2_a2 0
5040 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
5042 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
5044 #define SPEC_in2_ri2 0
5046 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
5048 help_l2_shift(s, f, o, 31);
5050 #define SPEC_in2_sh32 0
5052 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
5054 help_l2_shift(s, f, o, 63);
5056 #define SPEC_in2_sh64 0
5058 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5060 in2_a2(s, f, o);
5061 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5063 #define SPEC_in2_m2_8u 0
5065 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5067 in2_a2(s, f, o);
5068 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5070 #define SPEC_in2_m2_16s 0
5072 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5074 in2_a2(s, f, o);
5075 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5077 #define SPEC_in2_m2_16u 0
5079 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5081 in2_a2(s, f, o);
5082 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5084 #define SPEC_in2_m2_32s 0
5086 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5088 in2_a2(s, f, o);
5089 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5091 #define SPEC_in2_m2_32u 0
5093 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5095 in2_a2(s, f, o);
5096 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5098 #define SPEC_in2_m2_64 0
5100 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5102 in2_ri2(s, f, o);
5103 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5105 #define SPEC_in2_mri2_16u 0
5107 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5109 in2_ri2(s, f, o);
5110 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5112 #define SPEC_in2_mri2_32s 0
5114 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5116 in2_ri2(s, f, o);
5117 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5119 #define SPEC_in2_mri2_32u 0
5121 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5123 in2_ri2(s, f, o);
5124 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5126 #define SPEC_in2_mri2_64 0
5128 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
5130 o->in2 = tcg_const_i64(get_field(f, i2));
5132 #define SPEC_in2_i2 0
5134 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5136 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
5138 #define SPEC_in2_i2_8u 0
5140 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5142 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
5144 #define SPEC_in2_i2_16u 0
5146 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5148 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
5150 #define SPEC_in2_i2_32u 0
5152 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5154 uint64_t i2 = (uint16_t)get_field(f, i2);
5155 o->in2 = tcg_const_i64(i2 << s->insn->data);
5157 #define SPEC_in2_i2_16u_shl 0
5159 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5161 uint64_t i2 = (uint32_t)get_field(f, i2);
5162 o->in2 = tcg_const_i64(i2 << s->insn->data);
5164 #define SPEC_in2_i2_32u_shl 0
5166 #ifndef CONFIG_USER_ONLY
5167 static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
5169 o->in2 = tcg_const_i64(s->fields->raw_insn);
5171 #define SPEC_in2_insn 0
5172 #endif
5174 /* ====================================================================== */
5176 /* Find opc within the table of insns. This is formulated as a switch
5177 statement so that (1) we get compile-time notice of cut-paste errors
5178 for duplicated opcodes, and (2) the compiler generates the binary
5179 search tree, rather than us having to post-process the table. */
5181 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5182 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5184 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5186 enum DisasInsnEnum {
5187 #include "insn-data.def"
5190 #undef D
5191 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5192 .opc = OPC, \
5193 .fmt = FMT_##FT, \
5194 .fac = FAC_##FC, \
5195 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5196 .name = #NM, \
5197 .help_in1 = in1_##I1, \
5198 .help_in2 = in2_##I2, \
5199 .help_prep = prep_##P, \
5200 .help_wout = wout_##W, \
5201 .help_cout = cout_##CC, \
5202 .help_op = op_##OP, \
5203 .data = D \
5206 /* Allow 0 to be used for NULL in the table below. */
5207 #define in1_0 NULL
5208 #define in2_0 NULL
5209 #define prep_0 NULL
5210 #define wout_0 NULL
5211 #define cout_0 NULL
5212 #define op_0 NULL
5214 #define SPEC_in1_0 0
5215 #define SPEC_in2_0 0
5216 #define SPEC_prep_0 0
5217 #define SPEC_wout_0 0
5219 static const DisasInsn insn_info[] = {
5220 #include "insn-data.def"
5223 #undef D
5224 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5225 case OPC: return &insn_info[insn_ ## NM];
5227 static const DisasInsn *lookup_opc(uint16_t opc)
5229 switch (opc) {
5230 #include "insn-data.def"
5231 default:
5232 return NULL;
5236 #undef D
5237 #undef C
5239 /* Extract a field from the insn. The INSN should be left-aligned in
5240 the uint64_t so that we can more easily utilize the big-bit-endian
5241 definitions we extract from the Principals of Operation. */
5243 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
5245 uint32_t r, m;
5247 if (f->size == 0) {
5248 return;
5251 /* Zero extract the field from the insn. */
5252 r = (insn << f->beg) >> (64 - f->size);
5254 /* Sign-extend, or un-swap the field as necessary. */
5255 switch (f->type) {
5256 case 0: /* unsigned */
5257 break;
5258 case 1: /* signed */
5259 assert(f->size <= 32);
5260 m = 1u << (f->size - 1);
5261 r = (r ^ m) - m;
5262 break;
5263 case 2: /* dl+dh split, signed 20 bit. */
5264 r = ((int8_t)r << 12) | (r >> 8);
5265 break;
5266 default:
5267 abort();
5270 /* Validate that the "compressed" encoding we selected above is valid.
5271 I.e. we havn't make two different original fields overlap. */
5272 assert(((o->presentC >> f->indexC) & 1) == 0);
5273 o->presentC |= 1 << f->indexC;
5274 o->presentO |= 1 << f->indexO;
5276 o->c[f->indexC] = r;
5279 /* Lookup the insn at the current PC, extracting the operands into O and
5280 returning the info struct for the insn. Returns NULL for invalid insn. */
5282 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
5283 DisasFields *f)
5285 uint64_t insn, pc = s->pc;
5286 int op, op2, ilen;
5287 const DisasInsn *info;
5289 if (unlikely(s->ex_value)) {
5290 /* Drop the EX data now, so that it's clear on exception paths. */
5291 TCGv_i64 zero = tcg_const_i64(0);
5292 tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
5293 tcg_temp_free_i64(zero);
5295 /* Extract the values saved by EXECUTE. */
5296 insn = s->ex_value & 0xffffffffffff0000ull;
5297 ilen = s->ex_value & 0xf;
5298 op = insn >> 56;
5299 } else {
5300 insn = ld_code2(env, pc);
5301 op = (insn >> 8) & 0xff;
5302 ilen = get_ilen(op);
5303 switch (ilen) {
5304 case 2:
5305 insn = insn << 48;
5306 break;
5307 case 4:
5308 insn = ld_code4(env, pc) << 32;
5309 break;
5310 case 6:
5311 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
5312 break;
5313 default:
5314 g_assert_not_reached();
5317 s->next_pc = s->pc + ilen;
5318 s->ilen = ilen;
5320 /* We can't actually determine the insn format until we've looked up
5321 the full insn opcode. Which we can't do without locating the
5322 secondary opcode. Assume by default that OP2 is at bit 40; for
5323 those smaller insns that don't actually have a secondary opcode
5324 this will correctly result in OP2 = 0. */
5325 switch (op) {
5326 case 0x01: /* E */
5327 case 0x80: /* S */
5328 case 0x82: /* S */
5329 case 0x93: /* S */
5330 case 0xb2: /* S, RRF, RRE */
5331 case 0xb3: /* RRE, RRD, RRF */
5332 case 0xb9: /* RRE, RRF */
5333 case 0xe5: /* SSE, SIL */
5334 op2 = (insn << 8) >> 56;
5335 break;
5336 case 0xa5: /* RI */
5337 case 0xa7: /* RI */
5338 case 0xc0: /* RIL */
5339 case 0xc2: /* RIL */
5340 case 0xc4: /* RIL */
5341 case 0xc6: /* RIL */
5342 case 0xc8: /* SSF */
5343 case 0xcc: /* RIL */
5344 op2 = (insn << 12) >> 60;
5345 break;
5346 case 0xd0 ... 0xdf: /* SS */
5347 case 0xe1: /* SS */
5348 case 0xe2: /* SS */
5349 case 0xe8: /* SS */
5350 case 0xe9: /* SS */
5351 case 0xea: /* SS */
5352 case 0xee ... 0xf3: /* SS */
5353 case 0xf8 ... 0xfd: /* SS */
5354 op2 = 0;
5355 break;
5356 default:
5357 op2 = (insn << 40) >> 56;
5358 break;
5361 memset(f, 0, sizeof(*f));
5362 f->raw_insn = insn;
5363 f->op = op;
5364 f->op2 = op2;
5366 /* Lookup the instruction. */
5367 info = lookup_opc(op << 8 | op2);
5369 /* If we found it, extract the operands. */
5370 if (info != NULL) {
5371 DisasFormat fmt = info->fmt;
5372 int i;
5374 for (i = 0; i < NUM_C_FIELD; ++i) {
5375 extract_field(f, &format_info[fmt].op[i], insn);
5378 return info;
5381 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
5383 const DisasInsn *insn;
5384 ExitStatus ret = NO_EXIT;
5385 DisasFields f;
5386 DisasOps o;
5388 /* Search for the insn in the table. */
5389 insn = extract_insn(env, s, &f);
5391 /* Not found means unimplemented/illegal opcode. */
5392 if (insn == NULL) {
5393 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
5394 f.op, f.op2);
5395 gen_illegal_opcode(s);
5396 return EXIT_NORETURN;
5399 #ifndef CONFIG_USER_ONLY
5400 if (s->tb->flags & FLAG_MASK_PER) {
5401 TCGv_i64 addr = tcg_const_i64(s->pc);
5402 gen_helper_per_ifetch(cpu_env, addr);
5403 tcg_temp_free_i64(addr);
5405 #endif
5407 /* Check for insn specification exceptions. */
5408 if (insn->spec) {
5409 int spec = insn->spec, excp = 0, r;
5411 if (spec & SPEC_r1_even) {
5412 r = get_field(&f, r1);
5413 if (r & 1) {
5414 excp = PGM_SPECIFICATION;
5417 if (spec & SPEC_r2_even) {
5418 r = get_field(&f, r2);
5419 if (r & 1) {
5420 excp = PGM_SPECIFICATION;
5423 if (spec & SPEC_r3_even) {
5424 r = get_field(&f, r3);
5425 if (r & 1) {
5426 excp = PGM_SPECIFICATION;
5429 if (spec & SPEC_r1_f128) {
5430 r = get_field(&f, r1);
5431 if (r > 13) {
5432 excp = PGM_SPECIFICATION;
5435 if (spec & SPEC_r2_f128) {
5436 r = get_field(&f, r2);
5437 if (r > 13) {
5438 excp = PGM_SPECIFICATION;
5441 if (excp) {
5442 gen_program_exception(s, excp);
5443 return EXIT_NORETURN;
5447 /* Set up the strutures we use to communicate with the helpers. */
5448 s->insn = insn;
5449 s->fields = &f;
5450 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
5451 TCGV_UNUSED_I64(o.out);
5452 TCGV_UNUSED_I64(o.out2);
5453 TCGV_UNUSED_I64(o.in1);
5454 TCGV_UNUSED_I64(o.in2);
5455 TCGV_UNUSED_I64(o.addr1);
5457 /* Implement the instruction. */
5458 if (insn->help_in1) {
5459 insn->help_in1(s, &f, &o);
5461 if (insn->help_in2) {
5462 insn->help_in2(s, &f, &o);
5464 if (insn->help_prep) {
5465 insn->help_prep(s, &f, &o);
5467 if (insn->help_op) {
5468 ret = insn->help_op(s, &o);
5470 if (insn->help_wout) {
5471 insn->help_wout(s, &f, &o);
5473 if (insn->help_cout) {
5474 insn->help_cout(s, &o);
5477 /* Free any temporaries created by the helpers. */
5478 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
5479 tcg_temp_free_i64(o.out);
5481 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
5482 tcg_temp_free_i64(o.out2);
5484 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
5485 tcg_temp_free_i64(o.in1);
5487 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
5488 tcg_temp_free_i64(o.in2);
5490 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
5491 tcg_temp_free_i64(o.addr1);
5494 #ifndef CONFIG_USER_ONLY
5495 if (s->tb->flags & FLAG_MASK_PER) {
5496 /* An exception might be triggered, save PSW if not already done. */
5497 if (ret == NO_EXIT || ret == EXIT_PC_STALE) {
5498 tcg_gen_movi_i64(psw_addr, s->next_pc);
5501 /* Save off cc. */
5502 update_cc_op(s);
5504 /* Call the helper to check for a possible PER exception. */
5505 gen_helper_per_check_exception(cpu_env);
5507 #endif
5509 /* Advance to the next instruction. */
5510 s->pc = s->next_pc;
5511 return ret;
5514 void gen_intermediate_code(CPUS390XState *env, struct TranslationBlock *tb)
5516 S390CPU *cpu = s390_env_get_cpu(env);
5517 CPUState *cs = CPU(cpu);
5518 DisasContext dc;
5519 target_ulong pc_start;
5520 uint64_t next_page_start;
5521 int num_insns, max_insns;
5522 ExitStatus status;
5523 bool do_debug;
5525 pc_start = tb->pc;
5527 /* 31-bit mode */
5528 if (!(tb->flags & FLAG_MASK_64)) {
5529 pc_start &= 0x7fffffff;
5532 dc.tb = tb;
5533 dc.pc = pc_start;
5534 dc.cc_op = CC_OP_DYNAMIC;
5535 dc.ex_value = tb->cs_base;
5536 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
5538 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
5540 num_insns = 0;
5541 max_insns = tb->cflags & CF_COUNT_MASK;
5542 if (max_insns == 0) {
5543 max_insns = CF_COUNT_MASK;
5545 if (max_insns > TCG_MAX_INSNS) {
5546 max_insns = TCG_MAX_INSNS;
5549 gen_tb_start(tb);
5551 do {
5552 tcg_gen_insn_start(dc.pc, dc.cc_op);
5553 num_insns++;
5555 if (unlikely(cpu_breakpoint_test(cs, dc.pc, BP_ANY))) {
5556 status = EXIT_PC_STALE;
5557 do_debug = true;
5558 /* The address covered by the breakpoint must be included in
5559 [tb->pc, tb->pc + tb->size) in order to for it to be
5560 properly cleared -- thus we increment the PC here so that
5561 the logic setting tb->size below does the right thing. */
5562 dc.pc += 2;
5563 break;
5566 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5567 gen_io_start();
5570 status = translate_one(env, &dc);
5572 /* If we reach a page boundary, are single stepping,
5573 or exhaust instruction count, stop generation. */
5574 if (status == NO_EXIT
5575 && (dc.pc >= next_page_start
5576 || tcg_op_buf_full()
5577 || num_insns >= max_insns
5578 || singlestep
5579 || cs->singlestep_enabled
5580 || dc.ex_value)) {
5581 status = EXIT_PC_STALE;
5583 } while (status == NO_EXIT);
5585 if (tb->cflags & CF_LAST_IO) {
5586 gen_io_end();
5589 switch (status) {
5590 case EXIT_GOTO_TB:
5591 case EXIT_NORETURN:
5592 break;
5593 case EXIT_PC_STALE:
5594 update_psw_addr(&dc);
5595 /* FALLTHRU */
5596 case EXIT_PC_UPDATED:
5597 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5598 cc op type is in env */
5599 update_cc_op(&dc);
5600 /* FALLTHRU */
5601 case EXIT_PC_CC_UPDATED:
5602 /* Exit the TB, either by raising a debug exception or by return. */
5603 if (do_debug) {
5604 gen_exception(EXCP_DEBUG);
5605 } else if (use_exit_tb(&dc)) {
5606 tcg_gen_exit_tb(0);
5607 } else {
5608 tcg_gen_lookup_and_goto_ptr(psw_addr);
5610 break;
5611 default:
5612 abort();
5615 gen_tb_end(tb, num_insns);
5617 tb->size = dc.pc - pc_start;
5618 tb->icount = num_insns;
5620 #if defined(S390X_DEBUG_DISAS)
5621 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
5622 && qemu_log_in_addr_range(pc_start)) {
5623 qemu_log_lock();
5624 if (unlikely(dc.ex_value)) {
5625 /* ??? Unfortunately log_target_disas can't use host memory. */
5626 qemu_log("IN: EXECUTE %016" PRIx64 "\n", dc.ex_value);
5627 } else {
5628 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5629 log_target_disas(cs, pc_start, dc.pc - pc_start, 1);
5630 qemu_log("\n");
5632 qemu_log_unlock();
5634 #endif
5637 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
5638 target_ulong *data)
5640 int cc_op = data[1];
5641 env->psw.addr = data[0];
5642 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
5643 env->cc_op = cc_op;