target/s390x: change PSW_SHIFT_KEY
[qemu/ar7.git] / target / s390x / translate.c
blob6ebfb9742482326c16fe6f825ff5e4486654507b
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "disas/disas.h"
34 #include "exec/exec-all.h"
35 #include "tcg-op.h"
36 #include "qemu/log.h"
37 #include "qemu/host-utils.h"
38 #include "exec/cpu_ldst.h"
40 /* global register indexes */
41 static TCGv_env cpu_env;
43 #include "exec/gen-icount.h"
44 #include "exec/helper-proto.h"
45 #include "exec/helper-gen.h"
47 #include "trace-tcg.h"
48 #include "exec/log.h"
51 /* Information that (most) every instruction needs to manipulate. */
52 typedef struct DisasContext DisasContext;
53 typedef struct DisasInsn DisasInsn;
54 typedef struct DisasFields DisasFields;
56 struct DisasContext {
57 struct TranslationBlock *tb;
58 const DisasInsn *insn;
59 DisasFields *fields;
60 uint64_t ex_value;
61 uint64_t pc, next_pc;
62 uint32_t ilen;
63 enum cc_op cc_op;
64 bool singlestep_enabled;
67 /* Information carried about a condition to be evaluated. */
68 typedef struct {
69 TCGCond cond:8;
70 bool is_64;
71 bool g1;
72 bool g2;
73 union {
74 struct { TCGv_i64 a, b; } s64;
75 struct { TCGv_i32 a, b; } s32;
76 } u;
77 } DisasCompare;
79 #define DISAS_EXCP 4
81 #ifdef DEBUG_INLINE_BRANCHES
82 static uint64_t inline_branch_hit[CC_OP_MAX];
83 static uint64_t inline_branch_miss[CC_OP_MAX];
84 #endif
86 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
88 if (!(s->tb->flags & FLAG_MASK_64)) {
89 if (s->tb->flags & FLAG_MASK_32) {
90 return pc | 0x80000000;
93 return pc;
96 void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
97 int flags)
99 S390CPU *cpu = S390_CPU(cs);
100 CPUS390XState *env = &cpu->env;
101 int i;
103 if (env->cc_op > 3) {
104 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
105 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
106 } else {
107 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
108 env->psw.mask, env->psw.addr, env->cc_op);
111 for (i = 0; i < 16; i++) {
112 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
113 if ((i % 4) == 3) {
114 cpu_fprintf(f, "\n");
115 } else {
116 cpu_fprintf(f, " ");
120 for (i = 0; i < 16; i++) {
121 cpu_fprintf(f, "F%02d=%016" PRIx64, i, get_freg(env, i)->ll);
122 if ((i % 4) == 3) {
123 cpu_fprintf(f, "\n");
124 } else {
125 cpu_fprintf(f, " ");
129 for (i = 0; i < 32; i++) {
130 cpu_fprintf(f, "V%02d=%016" PRIx64 "%016" PRIx64, i,
131 env->vregs[i][0].ll, env->vregs[i][1].ll);
132 cpu_fprintf(f, (i % 2) ? "\n" : " ");
135 #ifndef CONFIG_USER_ONLY
136 for (i = 0; i < 16; i++) {
137 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
138 if ((i % 4) == 3) {
139 cpu_fprintf(f, "\n");
140 } else {
141 cpu_fprintf(f, " ");
144 #endif
146 #ifdef DEBUG_INLINE_BRANCHES
147 for (i = 0; i < CC_OP_MAX; i++) {
148 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
149 inline_branch_miss[i], inline_branch_hit[i]);
151 #endif
153 cpu_fprintf(f, "\n");
156 static TCGv_i64 psw_addr;
157 static TCGv_i64 psw_mask;
158 static TCGv_i64 gbea;
160 static TCGv_i32 cc_op;
161 static TCGv_i64 cc_src;
162 static TCGv_i64 cc_dst;
163 static TCGv_i64 cc_vr;
165 static char cpu_reg_names[32][4];
166 static TCGv_i64 regs[16];
167 static TCGv_i64 fregs[16];
169 void s390x_translate_init(void)
171 int i;
173 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
174 tcg_ctx.tcg_env = cpu_env;
175 psw_addr = tcg_global_mem_new_i64(cpu_env,
176 offsetof(CPUS390XState, psw.addr),
177 "psw_addr");
178 psw_mask = tcg_global_mem_new_i64(cpu_env,
179 offsetof(CPUS390XState, psw.mask),
180 "psw_mask");
181 gbea = tcg_global_mem_new_i64(cpu_env,
182 offsetof(CPUS390XState, gbea),
183 "gbea");
185 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
186 "cc_op");
187 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
188 "cc_src");
189 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
190 "cc_dst");
191 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
192 "cc_vr");
194 for (i = 0; i < 16; i++) {
195 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
196 regs[i] = tcg_global_mem_new(cpu_env,
197 offsetof(CPUS390XState, regs[i]),
198 cpu_reg_names[i]);
201 for (i = 0; i < 16; i++) {
202 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
203 fregs[i] = tcg_global_mem_new(cpu_env,
204 offsetof(CPUS390XState, vregs[i][0].d),
205 cpu_reg_names[i + 16]);
209 static TCGv_i64 load_reg(int reg)
211 TCGv_i64 r = tcg_temp_new_i64();
212 tcg_gen_mov_i64(r, regs[reg]);
213 return r;
216 static TCGv_i64 load_freg32_i64(int reg)
218 TCGv_i64 r = tcg_temp_new_i64();
219 tcg_gen_shri_i64(r, fregs[reg], 32);
220 return r;
223 static void store_reg(int reg, TCGv_i64 v)
225 tcg_gen_mov_i64(regs[reg], v);
228 static void store_freg(int reg, TCGv_i64 v)
230 tcg_gen_mov_i64(fregs[reg], v);
233 static void store_reg32_i64(int reg, TCGv_i64 v)
235 /* 32 bit register writes keep the upper half */
236 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
239 static void store_reg32h_i64(int reg, TCGv_i64 v)
241 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
244 static void store_freg32_i64(int reg, TCGv_i64 v)
246 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
249 static void return_low128(TCGv_i64 dest)
251 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
254 static void update_psw_addr(DisasContext *s)
256 /* psw.addr */
257 tcg_gen_movi_i64(psw_addr, s->pc);
260 static void per_branch(DisasContext *s, bool to_next)
262 #ifndef CONFIG_USER_ONLY
263 tcg_gen_movi_i64(gbea, s->pc);
265 if (s->tb->flags & FLAG_MASK_PER) {
266 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->next_pc) : psw_addr;
267 gen_helper_per_branch(cpu_env, gbea, next_pc);
268 if (to_next) {
269 tcg_temp_free_i64(next_pc);
272 #endif
275 static void per_branch_cond(DisasContext *s, TCGCond cond,
276 TCGv_i64 arg1, TCGv_i64 arg2)
278 #ifndef CONFIG_USER_ONLY
279 if (s->tb->flags & FLAG_MASK_PER) {
280 TCGLabel *lab = gen_new_label();
281 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
283 tcg_gen_movi_i64(gbea, s->pc);
284 gen_helper_per_branch(cpu_env, gbea, psw_addr);
286 gen_set_label(lab);
287 } else {
288 TCGv_i64 pc = tcg_const_i64(s->pc);
289 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
290 tcg_temp_free_i64(pc);
292 #endif
295 static void per_breaking_event(DisasContext *s)
297 tcg_gen_movi_i64(gbea, s->pc);
300 static void update_cc_op(DisasContext *s)
302 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
303 tcg_gen_movi_i32(cc_op, s->cc_op);
307 static void potential_page_fault(DisasContext *s)
309 update_psw_addr(s);
310 update_cc_op(s);
313 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
315 return (uint64_t)cpu_lduw_code(env, pc);
318 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
320 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
323 static int get_mem_index(DisasContext *s)
325 switch (s->tb->flags & FLAG_MASK_ASC) {
326 case PSW_ASC_PRIMARY >> 32:
327 return 0;
328 case PSW_ASC_SECONDARY >> 32:
329 return 1;
330 case PSW_ASC_HOME >> 32:
331 return 2;
332 default:
333 tcg_abort();
334 break;
338 static void gen_exception(int excp)
340 TCGv_i32 tmp = tcg_const_i32(excp);
341 gen_helper_exception(cpu_env, tmp);
342 tcg_temp_free_i32(tmp);
345 static void gen_program_exception(DisasContext *s, int code)
347 TCGv_i32 tmp;
349 /* Remember what pgm exeption this was. */
350 tmp = tcg_const_i32(code);
351 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
352 tcg_temp_free_i32(tmp);
354 tmp = tcg_const_i32(s->ilen);
355 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
356 tcg_temp_free_i32(tmp);
358 /* update the psw */
359 update_psw_addr(s);
361 /* Save off cc. */
362 update_cc_op(s);
364 /* Trigger exception. */
365 gen_exception(EXCP_PGM);
368 static inline void gen_illegal_opcode(DisasContext *s)
370 gen_program_exception(s, PGM_OPERATION);
373 static inline void gen_trap(DisasContext *s)
375 TCGv_i32 t;
377 /* Set DXC to 0xff. */
378 t = tcg_temp_new_i32();
379 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
380 tcg_gen_ori_i32(t, t, 0xff00);
381 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
382 tcg_temp_free_i32(t);
384 gen_program_exception(s, PGM_DATA);
387 #ifndef CONFIG_USER_ONLY
388 static void check_privileged(DisasContext *s)
390 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
391 gen_program_exception(s, PGM_PRIVILEGED);
394 #endif
396 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
398 TCGv_i64 tmp = tcg_temp_new_i64();
399 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
401 /* Note that d2 is limited to 20 bits, signed. If we crop negative
402 displacements early we create larger immedate addends. */
404 /* Note that addi optimizes the imm==0 case. */
405 if (b2 && x2) {
406 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
407 tcg_gen_addi_i64(tmp, tmp, d2);
408 } else if (b2) {
409 tcg_gen_addi_i64(tmp, regs[b2], d2);
410 } else if (x2) {
411 tcg_gen_addi_i64(tmp, regs[x2], d2);
412 } else {
413 if (need_31) {
414 d2 &= 0x7fffffff;
415 need_31 = false;
417 tcg_gen_movi_i64(tmp, d2);
419 if (need_31) {
420 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
423 return tmp;
426 static inline bool live_cc_data(DisasContext *s)
428 return (s->cc_op != CC_OP_DYNAMIC
429 && s->cc_op != CC_OP_STATIC
430 && s->cc_op > 3);
433 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
435 if (live_cc_data(s)) {
436 tcg_gen_discard_i64(cc_src);
437 tcg_gen_discard_i64(cc_dst);
438 tcg_gen_discard_i64(cc_vr);
440 s->cc_op = CC_OP_CONST0 + val;
443 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
445 if (live_cc_data(s)) {
446 tcg_gen_discard_i64(cc_src);
447 tcg_gen_discard_i64(cc_vr);
449 tcg_gen_mov_i64(cc_dst, dst);
450 s->cc_op = op;
453 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
454 TCGv_i64 dst)
456 if (live_cc_data(s)) {
457 tcg_gen_discard_i64(cc_vr);
459 tcg_gen_mov_i64(cc_src, src);
460 tcg_gen_mov_i64(cc_dst, dst);
461 s->cc_op = op;
464 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
465 TCGv_i64 dst, TCGv_i64 vr)
467 tcg_gen_mov_i64(cc_src, src);
468 tcg_gen_mov_i64(cc_dst, dst);
469 tcg_gen_mov_i64(cc_vr, vr);
470 s->cc_op = op;
473 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
475 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
478 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
480 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
483 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
485 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
488 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
490 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
493 /* CC value is in env->cc_op */
494 static void set_cc_static(DisasContext *s)
496 if (live_cc_data(s)) {
497 tcg_gen_discard_i64(cc_src);
498 tcg_gen_discard_i64(cc_dst);
499 tcg_gen_discard_i64(cc_vr);
501 s->cc_op = CC_OP_STATIC;
504 /* calculates cc into cc_op */
505 static void gen_op_calc_cc(DisasContext *s)
507 TCGv_i32 local_cc_op;
508 TCGv_i64 dummy;
510 TCGV_UNUSED_I32(local_cc_op);
511 TCGV_UNUSED_I64(dummy);
512 switch (s->cc_op) {
513 default:
514 dummy = tcg_const_i64(0);
515 /* FALLTHRU */
516 case CC_OP_ADD_64:
517 case CC_OP_ADDU_64:
518 case CC_OP_ADDC_64:
519 case CC_OP_SUB_64:
520 case CC_OP_SUBU_64:
521 case CC_OP_SUBB_64:
522 case CC_OP_ADD_32:
523 case CC_OP_ADDU_32:
524 case CC_OP_ADDC_32:
525 case CC_OP_SUB_32:
526 case CC_OP_SUBU_32:
527 case CC_OP_SUBB_32:
528 local_cc_op = tcg_const_i32(s->cc_op);
529 break;
530 case CC_OP_CONST0:
531 case CC_OP_CONST1:
532 case CC_OP_CONST2:
533 case CC_OP_CONST3:
534 case CC_OP_STATIC:
535 case CC_OP_DYNAMIC:
536 break;
539 switch (s->cc_op) {
540 case CC_OP_CONST0:
541 case CC_OP_CONST1:
542 case CC_OP_CONST2:
543 case CC_OP_CONST3:
544 /* s->cc_op is the cc value */
545 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
546 break;
547 case CC_OP_STATIC:
548 /* env->cc_op already is the cc value */
549 break;
550 case CC_OP_NZ:
551 case CC_OP_ABS_64:
552 case CC_OP_NABS_64:
553 case CC_OP_ABS_32:
554 case CC_OP_NABS_32:
555 case CC_OP_LTGT0_32:
556 case CC_OP_LTGT0_64:
557 case CC_OP_COMP_32:
558 case CC_OP_COMP_64:
559 case CC_OP_NZ_F32:
560 case CC_OP_NZ_F64:
561 case CC_OP_FLOGR:
562 /* 1 argument */
563 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
564 break;
565 case CC_OP_ICM:
566 case CC_OP_LTGT_32:
567 case CC_OP_LTGT_64:
568 case CC_OP_LTUGTU_32:
569 case CC_OP_LTUGTU_64:
570 case CC_OP_TM_32:
571 case CC_OP_TM_64:
572 case CC_OP_SLA_32:
573 case CC_OP_SLA_64:
574 case CC_OP_NZ_F128:
575 /* 2 arguments */
576 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
577 break;
578 case CC_OP_ADD_64:
579 case CC_OP_ADDU_64:
580 case CC_OP_ADDC_64:
581 case CC_OP_SUB_64:
582 case CC_OP_SUBU_64:
583 case CC_OP_SUBB_64:
584 case CC_OP_ADD_32:
585 case CC_OP_ADDU_32:
586 case CC_OP_ADDC_32:
587 case CC_OP_SUB_32:
588 case CC_OP_SUBU_32:
589 case CC_OP_SUBB_32:
590 /* 3 arguments */
591 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
592 break;
593 case CC_OP_DYNAMIC:
594 /* unknown operation - assume 3 arguments and cc_op in env */
595 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
596 break;
597 default:
598 tcg_abort();
601 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
602 tcg_temp_free_i32(local_cc_op);
604 if (!TCGV_IS_UNUSED_I64(dummy)) {
605 tcg_temp_free_i64(dummy);
608 /* We now have cc in cc_op as constant */
609 set_cc_static(s);
612 static bool use_exit_tb(DisasContext *s)
614 return (s->singlestep_enabled ||
615 (s->tb->cflags & CF_LAST_IO) ||
616 (s->tb->flags & FLAG_MASK_PER));
619 static bool use_goto_tb(DisasContext *s, uint64_t dest)
621 if (unlikely(use_exit_tb(s))) {
622 return false;
624 #ifndef CONFIG_USER_ONLY
625 return (dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) ||
626 (dest & TARGET_PAGE_MASK) == (s->pc & TARGET_PAGE_MASK);
627 #else
628 return true;
629 #endif
632 static void account_noninline_branch(DisasContext *s, int cc_op)
634 #ifdef DEBUG_INLINE_BRANCHES
635 inline_branch_miss[cc_op]++;
636 #endif
639 static void account_inline_branch(DisasContext *s, int cc_op)
641 #ifdef DEBUG_INLINE_BRANCHES
642 inline_branch_hit[cc_op]++;
643 #endif
646 /* Table of mask values to comparison codes, given a comparison as input.
647 For such, CC=3 should not be possible. */
648 static const TCGCond ltgt_cond[16] = {
649 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
650 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
651 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
652 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
653 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
654 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
655 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
656 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
659 /* Table of mask values to comparison codes, given a logic op as input.
660 For such, only CC=0 and CC=1 should be possible. */
661 static const TCGCond nz_cond[16] = {
662 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
663 TCG_COND_NEVER, TCG_COND_NEVER,
664 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
665 TCG_COND_NE, TCG_COND_NE,
666 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
667 TCG_COND_EQ, TCG_COND_EQ,
668 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
669 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
672 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
673 details required to generate a TCG comparison. */
674 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
676 TCGCond cond;
677 enum cc_op old_cc_op = s->cc_op;
679 if (mask == 15 || mask == 0) {
680 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
681 c->u.s32.a = cc_op;
682 c->u.s32.b = cc_op;
683 c->g1 = c->g2 = true;
684 c->is_64 = false;
685 return;
688 /* Find the TCG condition for the mask + cc op. */
689 switch (old_cc_op) {
690 case CC_OP_LTGT0_32:
691 case CC_OP_LTGT0_64:
692 case CC_OP_LTGT_32:
693 case CC_OP_LTGT_64:
694 cond = ltgt_cond[mask];
695 if (cond == TCG_COND_NEVER) {
696 goto do_dynamic;
698 account_inline_branch(s, old_cc_op);
699 break;
701 case CC_OP_LTUGTU_32:
702 case CC_OP_LTUGTU_64:
703 cond = tcg_unsigned_cond(ltgt_cond[mask]);
704 if (cond == TCG_COND_NEVER) {
705 goto do_dynamic;
707 account_inline_branch(s, old_cc_op);
708 break;
710 case CC_OP_NZ:
711 cond = nz_cond[mask];
712 if (cond == TCG_COND_NEVER) {
713 goto do_dynamic;
715 account_inline_branch(s, old_cc_op);
716 break;
718 case CC_OP_TM_32:
719 case CC_OP_TM_64:
720 switch (mask) {
721 case 8:
722 cond = TCG_COND_EQ;
723 break;
724 case 4 | 2 | 1:
725 cond = TCG_COND_NE;
726 break;
727 default:
728 goto do_dynamic;
730 account_inline_branch(s, old_cc_op);
731 break;
733 case CC_OP_ICM:
734 switch (mask) {
735 case 8:
736 cond = TCG_COND_EQ;
737 break;
738 case 4 | 2 | 1:
739 case 4 | 2:
740 cond = TCG_COND_NE;
741 break;
742 default:
743 goto do_dynamic;
745 account_inline_branch(s, old_cc_op);
746 break;
748 case CC_OP_FLOGR:
749 switch (mask & 0xa) {
750 case 8: /* src == 0 -> no one bit found */
751 cond = TCG_COND_EQ;
752 break;
753 case 2: /* src != 0 -> one bit found */
754 cond = TCG_COND_NE;
755 break;
756 default:
757 goto do_dynamic;
759 account_inline_branch(s, old_cc_op);
760 break;
762 case CC_OP_ADDU_32:
763 case CC_OP_ADDU_64:
764 switch (mask) {
765 case 8 | 2: /* vr == 0 */
766 cond = TCG_COND_EQ;
767 break;
768 case 4 | 1: /* vr != 0 */
769 cond = TCG_COND_NE;
770 break;
771 case 8 | 4: /* no carry -> vr >= src */
772 cond = TCG_COND_GEU;
773 break;
774 case 2 | 1: /* carry -> vr < src */
775 cond = TCG_COND_LTU;
776 break;
777 default:
778 goto do_dynamic;
780 account_inline_branch(s, old_cc_op);
781 break;
783 case CC_OP_SUBU_32:
784 case CC_OP_SUBU_64:
785 /* Note that CC=0 is impossible; treat it as dont-care. */
786 switch (mask & 7) {
787 case 2: /* zero -> op1 == op2 */
788 cond = TCG_COND_EQ;
789 break;
790 case 4 | 1: /* !zero -> op1 != op2 */
791 cond = TCG_COND_NE;
792 break;
793 case 4: /* borrow (!carry) -> op1 < op2 */
794 cond = TCG_COND_LTU;
795 break;
796 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
797 cond = TCG_COND_GEU;
798 break;
799 default:
800 goto do_dynamic;
802 account_inline_branch(s, old_cc_op);
803 break;
805 default:
806 do_dynamic:
807 /* Calculate cc value. */
808 gen_op_calc_cc(s);
809 /* FALLTHRU */
811 case CC_OP_STATIC:
812 /* Jump based on CC. We'll load up the real cond below;
813 the assignment here merely avoids a compiler warning. */
814 account_noninline_branch(s, old_cc_op);
815 old_cc_op = CC_OP_STATIC;
816 cond = TCG_COND_NEVER;
817 break;
820 /* Load up the arguments of the comparison. */
821 c->is_64 = true;
822 c->g1 = c->g2 = false;
823 switch (old_cc_op) {
824 case CC_OP_LTGT0_32:
825 c->is_64 = false;
826 c->u.s32.a = tcg_temp_new_i32();
827 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
828 c->u.s32.b = tcg_const_i32(0);
829 break;
830 case CC_OP_LTGT_32:
831 case CC_OP_LTUGTU_32:
832 case CC_OP_SUBU_32:
833 c->is_64 = false;
834 c->u.s32.a = tcg_temp_new_i32();
835 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
836 c->u.s32.b = tcg_temp_new_i32();
837 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
838 break;
840 case CC_OP_LTGT0_64:
841 case CC_OP_NZ:
842 case CC_OP_FLOGR:
843 c->u.s64.a = cc_dst;
844 c->u.s64.b = tcg_const_i64(0);
845 c->g1 = true;
846 break;
847 case CC_OP_LTGT_64:
848 case CC_OP_LTUGTU_64:
849 case CC_OP_SUBU_64:
850 c->u.s64.a = cc_src;
851 c->u.s64.b = cc_dst;
852 c->g1 = c->g2 = true;
853 break;
855 case CC_OP_TM_32:
856 case CC_OP_TM_64:
857 case CC_OP_ICM:
858 c->u.s64.a = tcg_temp_new_i64();
859 c->u.s64.b = tcg_const_i64(0);
860 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
861 break;
863 case CC_OP_ADDU_32:
864 c->is_64 = false;
865 c->u.s32.a = tcg_temp_new_i32();
866 c->u.s32.b = tcg_temp_new_i32();
867 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
868 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
869 tcg_gen_movi_i32(c->u.s32.b, 0);
870 } else {
871 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
873 break;
875 case CC_OP_ADDU_64:
876 c->u.s64.a = cc_vr;
877 c->g1 = true;
878 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
879 c->u.s64.b = tcg_const_i64(0);
880 } else {
881 c->u.s64.b = cc_src;
882 c->g2 = true;
884 break;
886 case CC_OP_STATIC:
887 c->is_64 = false;
888 c->u.s32.a = cc_op;
889 c->g1 = true;
890 switch (mask) {
891 case 0x8 | 0x4 | 0x2: /* cc != 3 */
892 cond = TCG_COND_NE;
893 c->u.s32.b = tcg_const_i32(3);
894 break;
895 case 0x8 | 0x4 | 0x1: /* cc != 2 */
896 cond = TCG_COND_NE;
897 c->u.s32.b = tcg_const_i32(2);
898 break;
899 case 0x8 | 0x2 | 0x1: /* cc != 1 */
900 cond = TCG_COND_NE;
901 c->u.s32.b = tcg_const_i32(1);
902 break;
903 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
904 cond = TCG_COND_EQ;
905 c->g1 = false;
906 c->u.s32.a = tcg_temp_new_i32();
907 c->u.s32.b = tcg_const_i32(0);
908 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
909 break;
910 case 0x8 | 0x4: /* cc < 2 */
911 cond = TCG_COND_LTU;
912 c->u.s32.b = tcg_const_i32(2);
913 break;
914 case 0x8: /* cc == 0 */
915 cond = TCG_COND_EQ;
916 c->u.s32.b = tcg_const_i32(0);
917 break;
918 case 0x4 | 0x2 | 0x1: /* cc != 0 */
919 cond = TCG_COND_NE;
920 c->u.s32.b = tcg_const_i32(0);
921 break;
922 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
923 cond = TCG_COND_NE;
924 c->g1 = false;
925 c->u.s32.a = tcg_temp_new_i32();
926 c->u.s32.b = tcg_const_i32(0);
927 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
928 break;
929 case 0x4: /* cc == 1 */
930 cond = TCG_COND_EQ;
931 c->u.s32.b = tcg_const_i32(1);
932 break;
933 case 0x2 | 0x1: /* cc > 1 */
934 cond = TCG_COND_GTU;
935 c->u.s32.b = tcg_const_i32(1);
936 break;
937 case 0x2: /* cc == 2 */
938 cond = TCG_COND_EQ;
939 c->u.s32.b = tcg_const_i32(2);
940 break;
941 case 0x1: /* cc == 3 */
942 cond = TCG_COND_EQ;
943 c->u.s32.b = tcg_const_i32(3);
944 break;
945 default:
946 /* CC is masked by something else: (8 >> cc) & mask. */
947 cond = TCG_COND_NE;
948 c->g1 = false;
949 c->u.s32.a = tcg_const_i32(8);
950 c->u.s32.b = tcg_const_i32(0);
951 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
952 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
953 break;
955 break;
957 default:
958 abort();
960 c->cond = cond;
963 static void free_compare(DisasCompare *c)
965 if (!c->g1) {
966 if (c->is_64) {
967 tcg_temp_free_i64(c->u.s64.a);
968 } else {
969 tcg_temp_free_i32(c->u.s32.a);
972 if (!c->g2) {
973 if (c->is_64) {
974 tcg_temp_free_i64(c->u.s64.b);
975 } else {
976 tcg_temp_free_i32(c->u.s32.b);
981 /* ====================================================================== */
982 /* Define the insn format enumeration. */
983 #define F0(N) FMT_##N,
984 #define F1(N, X1) F0(N)
985 #define F2(N, X1, X2) F0(N)
986 #define F3(N, X1, X2, X3) F0(N)
987 #define F4(N, X1, X2, X3, X4) F0(N)
988 #define F5(N, X1, X2, X3, X4, X5) F0(N)
990 typedef enum {
991 #include "insn-format.def"
992 } DisasFormat;
994 #undef F0
995 #undef F1
996 #undef F2
997 #undef F3
998 #undef F4
999 #undef F5
1001 /* Define a structure to hold the decoded fields. We'll store each inside
1002 an array indexed by an enum. In order to conserve memory, we'll arrange
1003 for fields that do not exist at the same time to overlap, thus the "C"
1004 for compact. For checking purposes there is an "O" for original index
1005 as well that will be applied to availability bitmaps. */
1007 enum DisasFieldIndexO {
1008 FLD_O_r1,
1009 FLD_O_r2,
1010 FLD_O_r3,
1011 FLD_O_m1,
1012 FLD_O_m3,
1013 FLD_O_m4,
1014 FLD_O_b1,
1015 FLD_O_b2,
1016 FLD_O_b4,
1017 FLD_O_d1,
1018 FLD_O_d2,
1019 FLD_O_d4,
1020 FLD_O_x2,
1021 FLD_O_l1,
1022 FLD_O_l2,
1023 FLD_O_i1,
1024 FLD_O_i2,
1025 FLD_O_i3,
1026 FLD_O_i4,
1027 FLD_O_i5
1030 enum DisasFieldIndexC {
1031 FLD_C_r1 = 0,
1032 FLD_C_m1 = 0,
1033 FLD_C_b1 = 0,
1034 FLD_C_i1 = 0,
1036 FLD_C_r2 = 1,
1037 FLD_C_b2 = 1,
1038 FLD_C_i2 = 1,
1040 FLD_C_r3 = 2,
1041 FLD_C_m3 = 2,
1042 FLD_C_i3 = 2,
1044 FLD_C_m4 = 3,
1045 FLD_C_b4 = 3,
1046 FLD_C_i4 = 3,
1047 FLD_C_l1 = 3,
1049 FLD_C_i5 = 4,
1050 FLD_C_d1 = 4,
1052 FLD_C_d2 = 5,
1054 FLD_C_d4 = 6,
1055 FLD_C_x2 = 6,
1056 FLD_C_l2 = 6,
1058 NUM_C_FIELD = 7
1061 struct DisasFields {
1062 uint64_t raw_insn;
1063 unsigned op:8;
1064 unsigned op2:8;
1065 unsigned presentC:16;
1066 unsigned int presentO;
1067 int c[NUM_C_FIELD];
1070 /* This is the way fields are to be accessed out of DisasFields. */
1071 #define have_field(S, F) have_field1((S), FLD_O_##F)
1072 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1074 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1076 return (f->presentO >> c) & 1;
1079 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1080 enum DisasFieldIndexC c)
1082 assert(have_field1(f, o));
1083 return f->c[c];
1086 /* Describe the layout of each field in each format. */
1087 typedef struct DisasField {
1088 unsigned int beg:8;
1089 unsigned int size:8;
1090 unsigned int type:2;
1091 unsigned int indexC:6;
1092 enum DisasFieldIndexO indexO:8;
1093 } DisasField;
1095 typedef struct DisasFormatInfo {
1096 DisasField op[NUM_C_FIELD];
1097 } DisasFormatInfo;
1099 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1100 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1101 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1102 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1103 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1104 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1105 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1106 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1107 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1108 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1109 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1110 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1111 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1112 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1114 #define F0(N) { { } },
1115 #define F1(N, X1) { { X1 } },
1116 #define F2(N, X1, X2) { { X1, X2 } },
1117 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1118 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1119 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1121 static const DisasFormatInfo format_info[] = {
1122 #include "insn-format.def"
1125 #undef F0
1126 #undef F1
1127 #undef F2
1128 #undef F3
1129 #undef F4
1130 #undef F5
1131 #undef R
1132 #undef M
1133 #undef BD
1134 #undef BXD
1135 #undef BDL
1136 #undef BXDL
1137 #undef I
1138 #undef L
1140 /* Generally, we'll extract operands into this structures, operate upon
1141 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1142 of routines below for more details. */
1143 typedef struct {
1144 bool g_out, g_out2, g_in1, g_in2;
1145 TCGv_i64 out, out2, in1, in2;
1146 TCGv_i64 addr1;
1147 } DisasOps;
1149 /* Instructions can place constraints on their operands, raising specification
1150 exceptions if they are violated. To make this easy to automate, each "in1",
1151 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1152 of the following, or 0. To make this easy to document, we'll put the
1153 SPEC_<name> defines next to <name>. */
1155 #define SPEC_r1_even 1
1156 #define SPEC_r2_even 2
1157 #define SPEC_r3_even 4
1158 #define SPEC_r1_f128 8
1159 #define SPEC_r2_f128 16
1161 /* Return values from translate_one, indicating the state of the TB. */
1162 typedef enum {
1163 /* Continue the TB. */
1164 NO_EXIT,
1165 /* We have emitted one or more goto_tb. No fixup required. */
1166 EXIT_GOTO_TB,
1167 /* We are not using a goto_tb (for whatever reason), but have updated
1168 the PC (for whatever reason), so there's no need to do it again on
1169 exiting the TB. */
1170 EXIT_PC_UPDATED,
1171 /* We have updated the PC and CC values. */
1172 EXIT_PC_CC_UPDATED,
1173 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1174 updated the PC for the next instruction to be executed. */
1175 EXIT_PC_STALE,
1176 /* We are exiting the TB to the main loop. */
1177 EXIT_PC_STALE_NOCHAIN,
1178 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1179 No following code will be executed. */
1180 EXIT_NORETURN,
1181 } ExitStatus;
1183 struct DisasInsn {
1184 unsigned opc:16;
1185 DisasFormat fmt:8;
1186 unsigned fac:8;
1187 unsigned spec:8;
1189 const char *name;
1191 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1192 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1193 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1194 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1195 void (*help_cout)(DisasContext *, DisasOps *);
1196 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1198 uint64_t data;
1201 /* ====================================================================== */
1202 /* Miscellaneous helpers, used by several operations. */
1204 static void help_l2_shift(DisasContext *s, DisasFields *f,
1205 DisasOps *o, int mask)
1207 int b2 = get_field(f, b2);
1208 int d2 = get_field(f, d2);
1210 if (b2 == 0) {
1211 o->in2 = tcg_const_i64(d2 & mask);
1212 } else {
1213 o->in2 = get_address(s, 0, b2, d2);
1214 tcg_gen_andi_i64(o->in2, o->in2, mask);
1218 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1220 if (dest == s->next_pc) {
1221 per_branch(s, true);
1222 return NO_EXIT;
1224 if (use_goto_tb(s, dest)) {
1225 update_cc_op(s);
1226 per_breaking_event(s);
1227 tcg_gen_goto_tb(0);
1228 tcg_gen_movi_i64(psw_addr, dest);
1229 tcg_gen_exit_tb((uintptr_t)s->tb);
1230 return EXIT_GOTO_TB;
1231 } else {
1232 tcg_gen_movi_i64(psw_addr, dest);
1233 per_branch(s, false);
1234 return EXIT_PC_UPDATED;
1238 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1239 bool is_imm, int imm, TCGv_i64 cdest)
1241 ExitStatus ret;
1242 uint64_t dest = s->pc + 2 * imm;
1243 TCGLabel *lab;
1245 /* Take care of the special cases first. */
1246 if (c->cond == TCG_COND_NEVER) {
1247 ret = NO_EXIT;
1248 goto egress;
1250 if (is_imm) {
1251 if (dest == s->next_pc) {
1252 /* Branch to next. */
1253 per_branch(s, true);
1254 ret = NO_EXIT;
1255 goto egress;
1257 if (c->cond == TCG_COND_ALWAYS) {
1258 ret = help_goto_direct(s, dest);
1259 goto egress;
1261 } else {
1262 if (TCGV_IS_UNUSED_I64(cdest)) {
1263 /* E.g. bcr %r0 -> no branch. */
1264 ret = NO_EXIT;
1265 goto egress;
1267 if (c->cond == TCG_COND_ALWAYS) {
1268 tcg_gen_mov_i64(psw_addr, cdest);
1269 per_branch(s, false);
1270 ret = EXIT_PC_UPDATED;
1271 goto egress;
1275 if (use_goto_tb(s, s->next_pc)) {
1276 if (is_imm && use_goto_tb(s, dest)) {
1277 /* Both exits can use goto_tb. */
1278 update_cc_op(s);
1280 lab = gen_new_label();
1281 if (c->is_64) {
1282 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1283 } else {
1284 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1287 /* Branch not taken. */
1288 tcg_gen_goto_tb(0);
1289 tcg_gen_movi_i64(psw_addr, s->next_pc);
1290 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1292 /* Branch taken. */
1293 gen_set_label(lab);
1294 per_breaking_event(s);
1295 tcg_gen_goto_tb(1);
1296 tcg_gen_movi_i64(psw_addr, dest);
1297 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1299 ret = EXIT_GOTO_TB;
1300 } else {
1301 /* Fallthru can use goto_tb, but taken branch cannot. */
1302 /* Store taken branch destination before the brcond. This
1303 avoids having to allocate a new local temp to hold it.
1304 We'll overwrite this in the not taken case anyway. */
1305 if (!is_imm) {
1306 tcg_gen_mov_i64(psw_addr, cdest);
1309 lab = gen_new_label();
1310 if (c->is_64) {
1311 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1312 } else {
1313 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1316 /* Branch not taken. */
1317 update_cc_op(s);
1318 tcg_gen_goto_tb(0);
1319 tcg_gen_movi_i64(psw_addr, s->next_pc);
1320 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1322 gen_set_label(lab);
1323 if (is_imm) {
1324 tcg_gen_movi_i64(psw_addr, dest);
1326 per_breaking_event(s);
1327 ret = EXIT_PC_UPDATED;
1329 } else {
1330 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1331 Most commonly we're single-stepping or some other condition that
1332 disables all use of goto_tb. Just update the PC and exit. */
1334 TCGv_i64 next = tcg_const_i64(s->next_pc);
1335 if (is_imm) {
1336 cdest = tcg_const_i64(dest);
1339 if (c->is_64) {
1340 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1341 cdest, next);
1342 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1343 } else {
1344 TCGv_i32 t0 = tcg_temp_new_i32();
1345 TCGv_i64 t1 = tcg_temp_new_i64();
1346 TCGv_i64 z = tcg_const_i64(0);
1347 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1348 tcg_gen_extu_i32_i64(t1, t0);
1349 tcg_temp_free_i32(t0);
1350 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1351 per_branch_cond(s, TCG_COND_NE, t1, z);
1352 tcg_temp_free_i64(t1);
1353 tcg_temp_free_i64(z);
1356 if (is_imm) {
1357 tcg_temp_free_i64(cdest);
1359 tcg_temp_free_i64(next);
1361 ret = EXIT_PC_UPDATED;
1364 egress:
1365 free_compare(c);
1366 return ret;
1369 /* ====================================================================== */
1370 /* The operations. These perform the bulk of the work for any insn,
1371 usually after the operands have been loaded and output initialized. */
1373 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1375 TCGv_i64 z, n;
1376 z = tcg_const_i64(0);
1377 n = tcg_temp_new_i64();
1378 tcg_gen_neg_i64(n, o->in2);
1379 tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1380 tcg_temp_free_i64(n);
1381 tcg_temp_free_i64(z);
1382 return NO_EXIT;
1385 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1387 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1388 return NO_EXIT;
1391 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1393 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1394 return NO_EXIT;
1397 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1399 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1400 tcg_gen_mov_i64(o->out2, o->in2);
1401 return NO_EXIT;
1404 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1406 tcg_gen_add_i64(o->out, o->in1, o->in2);
1407 return NO_EXIT;
1410 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1412 DisasCompare cmp;
1413 TCGv_i64 carry;
1415 tcg_gen_add_i64(o->out, o->in1, o->in2);
1417 /* The carry flag is the msb of CC, therefore the branch mask that would
1418 create that comparison is 3. Feeding the generated comparison to
1419 setcond produces the carry flag that we desire. */
1420 disas_jcc(s, &cmp, 3);
1421 carry = tcg_temp_new_i64();
1422 if (cmp.is_64) {
1423 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1424 } else {
1425 TCGv_i32 t = tcg_temp_new_i32();
1426 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1427 tcg_gen_extu_i32_i64(carry, t);
1428 tcg_temp_free_i32(t);
1430 free_compare(&cmp);
1432 tcg_gen_add_i64(o->out, o->out, carry);
1433 tcg_temp_free_i64(carry);
1434 return NO_EXIT;
1437 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1439 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1440 return NO_EXIT;
1443 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1445 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1446 return NO_EXIT;
1449 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1451 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1452 return_low128(o->out2);
1453 return NO_EXIT;
1456 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1458 tcg_gen_and_i64(o->out, o->in1, o->in2);
1459 return NO_EXIT;
1462 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1464 int shift = s->insn->data & 0xff;
1465 int size = s->insn->data >> 8;
1466 uint64_t mask = ((1ull << size) - 1) << shift;
1468 assert(!o->g_in2);
1469 tcg_gen_shli_i64(o->in2, o->in2, shift);
1470 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1471 tcg_gen_and_i64(o->out, o->in1, o->in2);
1473 /* Produce the CC from only the bits manipulated. */
1474 tcg_gen_andi_i64(cc_dst, o->out, mask);
1475 set_cc_nz_u64(s, cc_dst);
1476 return NO_EXIT;
1479 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1481 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1482 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1483 tcg_gen_mov_i64(psw_addr, o->in2);
1484 per_branch(s, false);
1485 return EXIT_PC_UPDATED;
1486 } else {
1487 return NO_EXIT;
1491 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1493 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1494 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1497 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1499 int m1 = get_field(s->fields, m1);
1500 bool is_imm = have_field(s->fields, i2);
1501 int imm = is_imm ? get_field(s->fields, i2) : 0;
1502 DisasCompare c;
1504 /* BCR with R2 = 0 causes no branching */
1505 if (have_field(s->fields, r2) && get_field(s->fields, r2) == 0) {
1506 if (m1 == 14) {
1507 /* Perform serialization */
1508 /* FIXME: check for fast-BCR-serialization facility */
1509 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1511 if (m1 == 15) {
1512 /* Perform serialization */
1513 /* FIXME: perform checkpoint-synchronisation */
1514 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1516 return NO_EXIT;
1519 disas_jcc(s, &c, m1);
1520 return help_branch(s, &c, is_imm, imm, o->in2);
1523 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1525 int r1 = get_field(s->fields, r1);
1526 bool is_imm = have_field(s->fields, i2);
1527 int imm = is_imm ? get_field(s->fields, i2) : 0;
1528 DisasCompare c;
1529 TCGv_i64 t;
1531 c.cond = TCG_COND_NE;
1532 c.is_64 = false;
1533 c.g1 = false;
1534 c.g2 = false;
1536 t = tcg_temp_new_i64();
1537 tcg_gen_subi_i64(t, regs[r1], 1);
1538 store_reg32_i64(r1, t);
1539 c.u.s32.a = tcg_temp_new_i32();
1540 c.u.s32.b = tcg_const_i32(0);
1541 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1542 tcg_temp_free_i64(t);
1544 return help_branch(s, &c, is_imm, imm, o->in2);
1547 static ExitStatus op_bcth(DisasContext *s, DisasOps *o)
1549 int r1 = get_field(s->fields, r1);
1550 int imm = get_field(s->fields, i2);
1551 DisasCompare c;
1552 TCGv_i64 t;
1554 c.cond = TCG_COND_NE;
1555 c.is_64 = false;
1556 c.g1 = false;
1557 c.g2 = false;
1559 t = tcg_temp_new_i64();
1560 tcg_gen_shri_i64(t, regs[r1], 32);
1561 tcg_gen_subi_i64(t, t, 1);
1562 store_reg32h_i64(r1, t);
1563 c.u.s32.a = tcg_temp_new_i32();
1564 c.u.s32.b = tcg_const_i32(0);
1565 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1566 tcg_temp_free_i64(t);
1568 return help_branch(s, &c, 1, imm, o->in2);
1571 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1573 int r1 = get_field(s->fields, r1);
1574 bool is_imm = have_field(s->fields, i2);
1575 int imm = is_imm ? get_field(s->fields, i2) : 0;
1576 DisasCompare c;
1578 c.cond = TCG_COND_NE;
1579 c.is_64 = true;
1580 c.g1 = true;
1581 c.g2 = false;
1583 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1584 c.u.s64.a = regs[r1];
1585 c.u.s64.b = tcg_const_i64(0);
1587 return help_branch(s, &c, is_imm, imm, o->in2);
1590 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1592 int r1 = get_field(s->fields, r1);
1593 int r3 = get_field(s->fields, r3);
1594 bool is_imm = have_field(s->fields, i2);
1595 int imm = is_imm ? get_field(s->fields, i2) : 0;
1596 DisasCompare c;
1597 TCGv_i64 t;
1599 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1600 c.is_64 = false;
1601 c.g1 = false;
1602 c.g2 = false;
1604 t = tcg_temp_new_i64();
1605 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1606 c.u.s32.a = tcg_temp_new_i32();
1607 c.u.s32.b = tcg_temp_new_i32();
1608 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1609 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1610 store_reg32_i64(r1, t);
1611 tcg_temp_free_i64(t);
1613 return help_branch(s, &c, is_imm, imm, o->in2);
1616 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1618 int r1 = get_field(s->fields, r1);
1619 int r3 = get_field(s->fields, r3);
1620 bool is_imm = have_field(s->fields, i2);
1621 int imm = is_imm ? get_field(s->fields, i2) : 0;
1622 DisasCompare c;
1624 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1625 c.is_64 = true;
1627 if (r1 == (r3 | 1)) {
1628 c.u.s64.b = load_reg(r3 | 1);
1629 c.g2 = false;
1630 } else {
1631 c.u.s64.b = regs[r3 | 1];
1632 c.g2 = true;
1635 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1636 c.u.s64.a = regs[r1];
1637 c.g1 = true;
1639 return help_branch(s, &c, is_imm, imm, o->in2);
1642 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1644 int imm, m3 = get_field(s->fields, m3);
1645 bool is_imm;
1646 DisasCompare c;
1648 c.cond = ltgt_cond[m3];
1649 if (s->insn->data) {
1650 c.cond = tcg_unsigned_cond(c.cond);
1652 c.is_64 = c.g1 = c.g2 = true;
1653 c.u.s64.a = o->in1;
1654 c.u.s64.b = o->in2;
1656 is_imm = have_field(s->fields, i4);
1657 if (is_imm) {
1658 imm = get_field(s->fields, i4);
1659 } else {
1660 imm = 0;
1661 o->out = get_address(s, 0, get_field(s->fields, b4),
1662 get_field(s->fields, d4));
1665 return help_branch(s, &c, is_imm, imm, o->out);
1668 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1670 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1671 set_cc_static(s);
1672 return NO_EXIT;
1675 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1677 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1678 set_cc_static(s);
1679 return NO_EXIT;
1682 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1684 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1685 set_cc_static(s);
1686 return NO_EXIT;
1689 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1691 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1692 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1693 tcg_temp_free_i32(m3);
1694 gen_set_cc_nz_f32(s, o->in2);
1695 return NO_EXIT;
1698 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1700 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1701 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1702 tcg_temp_free_i32(m3);
1703 gen_set_cc_nz_f64(s, o->in2);
1704 return NO_EXIT;
1707 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1709 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1710 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1711 tcg_temp_free_i32(m3);
1712 gen_set_cc_nz_f128(s, o->in1, o->in2);
1713 return NO_EXIT;
1716 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1718 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1719 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1720 tcg_temp_free_i32(m3);
1721 gen_set_cc_nz_f32(s, o->in2);
1722 return NO_EXIT;
1725 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1727 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1728 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1729 tcg_temp_free_i32(m3);
1730 gen_set_cc_nz_f64(s, o->in2);
1731 return NO_EXIT;
1734 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1736 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1737 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1738 tcg_temp_free_i32(m3);
1739 gen_set_cc_nz_f128(s, o->in1, o->in2);
1740 return NO_EXIT;
1743 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1745 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1746 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1747 tcg_temp_free_i32(m3);
1748 gen_set_cc_nz_f32(s, o->in2);
1749 return NO_EXIT;
1752 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1754 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1755 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1756 tcg_temp_free_i32(m3);
1757 gen_set_cc_nz_f64(s, o->in2);
1758 return NO_EXIT;
1761 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1763 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1764 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1765 tcg_temp_free_i32(m3);
1766 gen_set_cc_nz_f128(s, o->in1, o->in2);
1767 return NO_EXIT;
1770 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1772 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1773 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1774 tcg_temp_free_i32(m3);
1775 gen_set_cc_nz_f32(s, o->in2);
1776 return NO_EXIT;
1779 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1781 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1782 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1783 tcg_temp_free_i32(m3);
1784 gen_set_cc_nz_f64(s, o->in2);
1785 return NO_EXIT;
1788 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1790 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1791 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1792 tcg_temp_free_i32(m3);
1793 gen_set_cc_nz_f128(s, o->in1, o->in2);
1794 return NO_EXIT;
1797 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1799 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1800 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1801 tcg_temp_free_i32(m3);
1802 return NO_EXIT;
1805 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1807 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1808 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1809 tcg_temp_free_i32(m3);
1810 return NO_EXIT;
1813 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1815 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1816 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1817 tcg_temp_free_i32(m3);
1818 return_low128(o->out2);
1819 return NO_EXIT;
1822 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1824 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1825 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1826 tcg_temp_free_i32(m3);
1827 return NO_EXIT;
1830 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1832 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1833 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1834 tcg_temp_free_i32(m3);
1835 return NO_EXIT;
1838 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1840 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1841 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1842 tcg_temp_free_i32(m3);
1843 return_low128(o->out2);
1844 return NO_EXIT;
1847 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1849 int r2 = get_field(s->fields, r2);
1850 TCGv_i64 len = tcg_temp_new_i64();
1852 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1853 set_cc_static(s);
1854 return_low128(o->out);
1856 tcg_gen_add_i64(regs[r2], regs[r2], len);
1857 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1858 tcg_temp_free_i64(len);
1860 return NO_EXIT;
1863 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1865 int l = get_field(s->fields, l1);
1866 TCGv_i32 vl;
1868 switch (l + 1) {
1869 case 1:
1870 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1871 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1872 break;
1873 case 2:
1874 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1875 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1876 break;
1877 case 4:
1878 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1879 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1880 break;
1881 case 8:
1882 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1883 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1884 break;
1885 default:
1886 vl = tcg_const_i32(l);
1887 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1888 tcg_temp_free_i32(vl);
1889 set_cc_static(s);
1890 return NO_EXIT;
1892 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1893 return NO_EXIT;
1896 static ExitStatus op_clcl(DisasContext *s, DisasOps *o)
1898 int r1 = get_field(s->fields, r1);
1899 int r2 = get_field(s->fields, r2);
1900 TCGv_i32 t1, t2;
1902 /* r1 and r2 must be even. */
1903 if (r1 & 1 || r2 & 1) {
1904 gen_program_exception(s, PGM_SPECIFICATION);
1905 return EXIT_NORETURN;
1908 t1 = tcg_const_i32(r1);
1909 t2 = tcg_const_i32(r2);
1910 gen_helper_clcl(cc_op, cpu_env, t1, t2);
1911 tcg_temp_free_i32(t1);
1912 tcg_temp_free_i32(t2);
1913 set_cc_static(s);
1914 return NO_EXIT;
1917 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1919 int r1 = get_field(s->fields, r1);
1920 int r3 = get_field(s->fields, r3);
1921 TCGv_i32 t1, t3;
1923 /* r1 and r3 must be even. */
1924 if (r1 & 1 || r3 & 1) {
1925 gen_program_exception(s, PGM_SPECIFICATION);
1926 return EXIT_NORETURN;
1929 t1 = tcg_const_i32(r1);
1930 t3 = tcg_const_i32(r3);
1931 gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
1932 tcg_temp_free_i32(t1);
1933 tcg_temp_free_i32(t3);
1934 set_cc_static(s);
1935 return NO_EXIT;
1938 static ExitStatus op_clclu(DisasContext *s, DisasOps *o)
1940 int r1 = get_field(s->fields, r1);
1941 int r3 = get_field(s->fields, r3);
1942 TCGv_i32 t1, t3;
1944 /* r1 and r3 must be even. */
1945 if (r1 & 1 || r3 & 1) {
1946 gen_program_exception(s, PGM_SPECIFICATION);
1947 return EXIT_NORETURN;
1950 t1 = tcg_const_i32(r1);
1951 t3 = tcg_const_i32(r3);
1952 gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
1953 tcg_temp_free_i32(t1);
1954 tcg_temp_free_i32(t3);
1955 set_cc_static(s);
1956 return NO_EXIT;
1959 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1961 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1962 TCGv_i32 t1 = tcg_temp_new_i32();
1963 tcg_gen_extrl_i64_i32(t1, o->in1);
1964 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1965 set_cc_static(s);
1966 tcg_temp_free_i32(t1);
1967 tcg_temp_free_i32(m3);
1968 return NO_EXIT;
1971 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1973 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1974 set_cc_static(s);
1975 return_low128(o->in2);
1976 return NO_EXIT;
1979 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1981 TCGv_i64 t = tcg_temp_new_i64();
1982 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1983 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1984 tcg_gen_or_i64(o->out, o->out, t);
1985 tcg_temp_free_i64(t);
1986 return NO_EXIT;
1989 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1991 int d2 = get_field(s->fields, d2);
1992 int b2 = get_field(s->fields, b2);
1993 TCGv_i64 addr, cc;
1995 /* Note that in1 = R3 (new value) and
1996 in2 = (zero-extended) R1 (expected value). */
1998 addr = get_address(s, 0, b2, d2);
1999 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2000 get_mem_index(s), s->insn->data | MO_ALIGN);
2001 tcg_temp_free_i64(addr);
2003 /* Are the memory and expected values (un)equal? Note that this setcond
2004 produces the output CC value, thus the NE sense of the test. */
2005 cc = tcg_temp_new_i64();
2006 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2007 tcg_gen_extrl_i64_i32(cc_op, cc);
2008 tcg_temp_free_i64(cc);
2009 set_cc_static(s);
2011 return NO_EXIT;
2014 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
2016 int r1 = get_field(s->fields, r1);
2017 int r3 = get_field(s->fields, r3);
2018 int d2 = get_field(s->fields, d2);
2019 int b2 = get_field(s->fields, b2);
2020 TCGv_i64 addr;
2021 TCGv_i32 t_r1, t_r3;
2023 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
2024 addr = get_address(s, 0, b2, d2);
2025 t_r1 = tcg_const_i32(r1);
2026 t_r3 = tcg_const_i32(r3);
2027 gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2028 tcg_temp_free_i64(addr);
2029 tcg_temp_free_i32(t_r1);
2030 tcg_temp_free_i32(t_r3);
2032 set_cc_static(s);
2033 return NO_EXIT;
2036 #ifndef CONFIG_USER_ONLY
2037 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
2039 TCGMemOp mop = s->insn->data;
2040 TCGv_i64 addr, old, cc;
2041 TCGLabel *lab = gen_new_label();
2043 /* Note that in1 = R1 (zero-extended expected value),
2044 out = R1 (original reg), out2 = R1+1 (new value). */
2046 check_privileged(s);
2047 addr = tcg_temp_new_i64();
2048 old = tcg_temp_new_i64();
2049 tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2050 tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2051 get_mem_index(s), mop | MO_ALIGN);
2052 tcg_temp_free_i64(addr);
2054 /* Are the memory and expected values (un)equal? */
2055 cc = tcg_temp_new_i64();
2056 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2057 tcg_gen_extrl_i64_i32(cc_op, cc);
2059 /* Write back the output now, so that it happens before the
2060 following branch, so that we don't need local temps. */
2061 if ((mop & MO_SIZE) == MO_32) {
2062 tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2063 } else {
2064 tcg_gen_mov_i64(o->out, old);
2066 tcg_temp_free_i64(old);
2068 /* If the comparison was equal, and the LSB of R2 was set,
2069 then we need to flush the TLB (for all cpus). */
2070 tcg_gen_xori_i64(cc, cc, 1);
2071 tcg_gen_and_i64(cc, cc, o->in2);
2072 tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2073 tcg_temp_free_i64(cc);
2075 gen_helper_purge(cpu_env);
2076 gen_set_label(lab);
2078 return NO_EXIT;
2080 #endif
2082 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2084 TCGv_i64 t1 = tcg_temp_new_i64();
2085 TCGv_i32 t2 = tcg_temp_new_i32();
2086 tcg_gen_extrl_i64_i32(t2, o->in1);
2087 gen_helper_cvd(t1, t2);
2088 tcg_temp_free_i32(t2);
2089 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2090 tcg_temp_free_i64(t1);
2091 return NO_EXIT;
2094 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
2096 int m3 = get_field(s->fields, m3);
2097 TCGLabel *lab = gen_new_label();
2098 TCGCond c;
2100 c = tcg_invert_cond(ltgt_cond[m3]);
2101 if (s->insn->data) {
2102 c = tcg_unsigned_cond(c);
2104 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2106 /* Trap. */
2107 gen_trap(s);
2109 gen_set_label(lab);
2110 return NO_EXIT;
2113 #ifndef CONFIG_USER_ONLY
2114 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2116 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2117 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2118 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2120 check_privileged(s);
2121 update_psw_addr(s);
2122 gen_op_calc_cc(s);
2124 gen_helper_diag(cpu_env, r1, r3, func_code);
2126 tcg_temp_free_i32(func_code);
2127 tcg_temp_free_i32(r3);
2128 tcg_temp_free_i32(r1);
2129 return NO_EXIT;
2131 #endif
2133 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2135 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2136 return_low128(o->out);
2137 return NO_EXIT;
2140 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2142 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2143 return_low128(o->out);
2144 return NO_EXIT;
2147 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2149 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2150 return_low128(o->out);
2151 return NO_EXIT;
2154 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2156 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2157 return_low128(o->out);
2158 return NO_EXIT;
2161 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2163 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2164 return NO_EXIT;
2167 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2169 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2170 return NO_EXIT;
2173 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2175 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2176 return_low128(o->out2);
2177 return NO_EXIT;
2180 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2182 int r2 = get_field(s->fields, r2);
2183 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2184 return NO_EXIT;
2187 static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
2189 /* No cache information provided. */
2190 tcg_gen_movi_i64(o->out, -1);
2191 return NO_EXIT;
2194 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2196 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2197 return NO_EXIT;
2200 static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
2202 int r1 = get_field(s->fields, r1);
2203 int r2 = get_field(s->fields, r2);
2204 TCGv_i64 t = tcg_temp_new_i64();
2206 /* Note the "subsequently" in the PoO, which implies a defined result
2207 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2208 tcg_gen_shri_i64(t, psw_mask, 32);
2209 store_reg32_i64(r1, t);
2210 if (r2 != 0) {
2211 store_reg32_i64(r2, psw_mask);
2214 tcg_temp_free_i64(t);
2215 return NO_EXIT;
2218 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2220 int r1 = get_field(s->fields, r1);
2221 TCGv_i32 ilen;
2222 TCGv_i64 v1;
2224 /* Nested EXECUTE is not allowed. */
2225 if (unlikely(s->ex_value)) {
2226 gen_program_exception(s, PGM_EXECUTE);
2227 return EXIT_NORETURN;
2230 update_psw_addr(s);
2231 update_cc_op(s);
2233 if (r1 == 0) {
2234 v1 = tcg_const_i64(0);
2235 } else {
2236 v1 = regs[r1];
2239 ilen = tcg_const_i32(s->ilen);
2240 gen_helper_ex(cpu_env, ilen, v1, o->in2);
2241 tcg_temp_free_i32(ilen);
2243 if (r1 == 0) {
2244 tcg_temp_free_i64(v1);
2247 return EXIT_PC_CC_UPDATED;
2250 static ExitStatus op_fieb(DisasContext *s, DisasOps *o)
2252 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2253 gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2254 tcg_temp_free_i32(m3);
2255 return NO_EXIT;
2258 static ExitStatus op_fidb(DisasContext *s, DisasOps *o)
2260 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2261 gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2262 tcg_temp_free_i32(m3);
2263 return NO_EXIT;
2266 static ExitStatus op_fixb(DisasContext *s, DisasOps *o)
2268 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2269 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2270 return_low128(o->out2);
2271 tcg_temp_free_i32(m3);
2272 return NO_EXIT;
2275 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2277 /* We'll use the original input for cc computation, since we get to
2278 compare that against 0, which ought to be better than comparing
2279 the real output against 64. It also lets cc_dst be a convenient
2280 temporary during our computation. */
2281 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2283 /* R1 = IN ? CLZ(IN) : 64. */
2284 tcg_gen_clzi_i64(o->out, o->in2, 64);
2286 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2287 value by 64, which is undefined. But since the shift is 64 iff the
2288 input is zero, we still get the correct result after and'ing. */
2289 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2290 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2291 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2292 return NO_EXIT;
2295 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2297 int m3 = get_field(s->fields, m3);
2298 int pos, len, base = s->insn->data;
2299 TCGv_i64 tmp = tcg_temp_new_i64();
2300 uint64_t ccm;
2302 switch (m3) {
2303 case 0xf:
2304 /* Effectively a 32-bit load. */
2305 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2306 len = 32;
2307 goto one_insert;
2309 case 0xc:
2310 case 0x6:
2311 case 0x3:
2312 /* Effectively a 16-bit load. */
2313 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2314 len = 16;
2315 goto one_insert;
2317 case 0x8:
2318 case 0x4:
2319 case 0x2:
2320 case 0x1:
2321 /* Effectively an 8-bit load. */
2322 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2323 len = 8;
2324 goto one_insert;
2326 one_insert:
2327 pos = base + ctz32(m3) * 8;
2328 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2329 ccm = ((1ull << len) - 1) << pos;
2330 break;
2332 default:
2333 /* This is going to be a sequence of loads and inserts. */
2334 pos = base + 32 - 8;
2335 ccm = 0;
2336 while (m3) {
2337 if (m3 & 0x8) {
2338 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2339 tcg_gen_addi_i64(o->in2, o->in2, 1);
2340 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2341 ccm |= 0xff << pos;
2343 m3 = (m3 << 1) & 0xf;
2344 pos -= 8;
2346 break;
2349 tcg_gen_movi_i64(tmp, ccm);
2350 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2351 tcg_temp_free_i64(tmp);
2352 return NO_EXIT;
2355 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2357 int shift = s->insn->data & 0xff;
2358 int size = s->insn->data >> 8;
2359 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2360 return NO_EXIT;
2363 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2365 TCGv_i64 t1;
2367 gen_op_calc_cc(s);
2368 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2370 t1 = tcg_temp_new_i64();
2371 tcg_gen_shli_i64(t1, psw_mask, 20);
2372 tcg_gen_shri_i64(t1, t1, 36);
2373 tcg_gen_or_i64(o->out, o->out, t1);
2375 tcg_gen_extu_i32_i64(t1, cc_op);
2376 tcg_gen_shli_i64(t1, t1, 28);
2377 tcg_gen_or_i64(o->out, o->out, t1);
2378 tcg_temp_free_i64(t1);
2379 return NO_EXIT;
2382 #ifndef CONFIG_USER_ONLY
2383 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2385 TCGv_i32 m4;
2387 check_privileged(s);
2388 m4 = tcg_const_i32(get_field(s->fields, m4));
2389 gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2390 tcg_temp_free_i32(m4);
2391 return NO_EXIT;
2394 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2396 check_privileged(s);
2397 gen_helper_iske(o->out, cpu_env, o->in2);
2398 return NO_EXIT;
2400 #endif
2402 static ExitStatus op_keb(DisasContext *s, DisasOps *o)
2404 gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2405 set_cc_static(s);
2406 return NO_EXIT;
2409 static ExitStatus op_kdb(DisasContext *s, DisasOps *o)
2411 gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2412 set_cc_static(s);
2413 return NO_EXIT;
2416 static ExitStatus op_kxb(DisasContext *s, DisasOps *o)
2418 gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2419 set_cc_static(s);
2420 return NO_EXIT;
2423 static ExitStatus op_laa(DisasContext *s, DisasOps *o)
2425 /* The real output is indeed the original value in memory;
2426 recompute the addition for the computation of CC. */
2427 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2428 s->insn->data | MO_ALIGN);
2429 /* However, we need to recompute the addition for setting CC. */
2430 tcg_gen_add_i64(o->out, o->in1, o->in2);
2431 return NO_EXIT;
2434 static ExitStatus op_lan(DisasContext *s, DisasOps *o)
2436 /* The real output is indeed the original value in memory;
2437 recompute the addition for the computation of CC. */
2438 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2439 s->insn->data | MO_ALIGN);
2440 /* However, we need to recompute the operation for setting CC. */
2441 tcg_gen_and_i64(o->out, o->in1, o->in2);
2442 return NO_EXIT;
2445 static ExitStatus op_lao(DisasContext *s, DisasOps *o)
2447 /* The real output is indeed the original value in memory;
2448 recompute the addition for the computation of CC. */
2449 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2450 s->insn->data | MO_ALIGN);
2451 /* However, we need to recompute the operation for setting CC. */
2452 tcg_gen_or_i64(o->out, o->in1, o->in2);
2453 return NO_EXIT;
2456 static ExitStatus op_lax(DisasContext *s, DisasOps *o)
2458 /* The real output is indeed the original value in memory;
2459 recompute the addition for the computation of CC. */
2460 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2461 s->insn->data | MO_ALIGN);
2462 /* However, we need to recompute the operation for setting CC. */
2463 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2464 return NO_EXIT;
2467 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2469 gen_helper_ldeb(o->out, cpu_env, o->in2);
2470 return NO_EXIT;
2473 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2475 gen_helper_ledb(o->out, cpu_env, o->in2);
2476 return NO_EXIT;
2479 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2481 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2482 return NO_EXIT;
2485 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2487 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2488 return NO_EXIT;
2491 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2493 gen_helper_lxdb(o->out, cpu_env, o->in2);
2494 return_low128(o->out2);
2495 return NO_EXIT;
2498 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2500 gen_helper_lxeb(o->out, cpu_env, o->in2);
2501 return_low128(o->out2);
2502 return NO_EXIT;
2505 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2507 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2508 return NO_EXIT;
2511 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2513 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2514 return NO_EXIT;
2517 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2519 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2520 return NO_EXIT;
2523 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2525 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2526 return NO_EXIT;
2529 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2531 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2532 return NO_EXIT;
2535 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2537 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2538 return NO_EXIT;
2541 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2543 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2544 return NO_EXIT;
2547 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2549 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2550 return NO_EXIT;
2553 static ExitStatus op_lat(DisasContext *s, DisasOps *o)
2555 TCGLabel *lab = gen_new_label();
2556 store_reg32_i64(get_field(s->fields, r1), o->in2);
2557 /* The value is stored even in case of trap. */
2558 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2559 gen_trap(s);
2560 gen_set_label(lab);
2561 return NO_EXIT;
2564 static ExitStatus op_lgat(DisasContext *s, DisasOps *o)
2566 TCGLabel *lab = gen_new_label();
2567 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2568 /* The value is stored even in case of trap. */
2569 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2570 gen_trap(s);
2571 gen_set_label(lab);
2572 return NO_EXIT;
2575 static ExitStatus op_lfhat(DisasContext *s, DisasOps *o)
2577 TCGLabel *lab = gen_new_label();
2578 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2579 /* The value is stored even in case of trap. */
2580 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2581 gen_trap(s);
2582 gen_set_label(lab);
2583 return NO_EXIT;
2586 static ExitStatus op_llgfat(DisasContext *s, DisasOps *o)
2588 TCGLabel *lab = gen_new_label();
2589 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2590 /* The value is stored even in case of trap. */
2591 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2592 gen_trap(s);
2593 gen_set_label(lab);
2594 return NO_EXIT;
2597 static ExitStatus op_llgtat(DisasContext *s, DisasOps *o)
2599 TCGLabel *lab = gen_new_label();
2600 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2601 /* The value is stored even in case of trap. */
2602 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2603 gen_trap(s);
2604 gen_set_label(lab);
2605 return NO_EXIT;
2608 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2610 DisasCompare c;
2612 disas_jcc(s, &c, get_field(s->fields, m3));
2614 if (c.is_64) {
2615 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2616 o->in2, o->in1);
2617 free_compare(&c);
2618 } else {
2619 TCGv_i32 t32 = tcg_temp_new_i32();
2620 TCGv_i64 t, z;
2622 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2623 free_compare(&c);
2625 t = tcg_temp_new_i64();
2626 tcg_gen_extu_i32_i64(t, t32);
2627 tcg_temp_free_i32(t32);
2629 z = tcg_const_i64(0);
2630 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2631 tcg_temp_free_i64(t);
2632 tcg_temp_free_i64(z);
2635 return NO_EXIT;
2638 #ifndef CONFIG_USER_ONLY
2639 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2641 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2642 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2643 check_privileged(s);
2644 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2645 tcg_temp_free_i32(r1);
2646 tcg_temp_free_i32(r3);
2647 return NO_EXIT;
2650 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2652 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2653 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2654 check_privileged(s);
2655 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2656 tcg_temp_free_i32(r1);
2657 tcg_temp_free_i32(r3);
2658 return NO_EXIT;
2661 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2663 check_privileged(s);
2664 gen_helper_lra(o->out, cpu_env, o->in2);
2665 set_cc_static(s);
2666 return NO_EXIT;
2669 static ExitStatus op_lpp(DisasContext *s, DisasOps *o)
2671 check_privileged(s);
2673 tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
2674 return NO_EXIT;
2677 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2679 TCGv_i64 t1, t2;
2681 check_privileged(s);
2682 per_breaking_event(s);
2684 t1 = tcg_temp_new_i64();
2685 t2 = tcg_temp_new_i64();
2686 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2687 tcg_gen_addi_i64(o->in2, o->in2, 4);
2688 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2689 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2690 tcg_gen_shli_i64(t1, t1, 32);
2691 gen_helper_load_psw(cpu_env, t1, t2);
2692 tcg_temp_free_i64(t1);
2693 tcg_temp_free_i64(t2);
2694 return EXIT_NORETURN;
2697 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2699 TCGv_i64 t1, t2;
2701 check_privileged(s);
2702 per_breaking_event(s);
2704 t1 = tcg_temp_new_i64();
2705 t2 = tcg_temp_new_i64();
2706 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2707 tcg_gen_addi_i64(o->in2, o->in2, 8);
2708 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2709 gen_helper_load_psw(cpu_env, t1, t2);
2710 tcg_temp_free_i64(t1);
2711 tcg_temp_free_i64(t2);
2712 return EXIT_NORETURN;
2714 #endif
2716 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2718 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2719 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2720 gen_helper_lam(cpu_env, r1, o->in2, r3);
2721 tcg_temp_free_i32(r1);
2722 tcg_temp_free_i32(r3);
2723 return NO_EXIT;
2726 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2728 int r1 = get_field(s->fields, r1);
2729 int r3 = get_field(s->fields, r3);
2730 TCGv_i64 t1, t2;
2732 /* Only one register to read. */
2733 t1 = tcg_temp_new_i64();
2734 if (unlikely(r1 == r3)) {
2735 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2736 store_reg32_i64(r1, t1);
2737 tcg_temp_free(t1);
2738 return NO_EXIT;
2741 /* First load the values of the first and last registers to trigger
2742 possible page faults. */
2743 t2 = tcg_temp_new_i64();
2744 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2745 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2746 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2747 store_reg32_i64(r1, t1);
2748 store_reg32_i64(r3, t2);
2750 /* Only two registers to read. */
2751 if (((r1 + 1) & 15) == r3) {
2752 tcg_temp_free(t2);
2753 tcg_temp_free(t1);
2754 return NO_EXIT;
2757 /* Then load the remaining registers. Page fault can't occur. */
2758 r3 = (r3 - 1) & 15;
2759 tcg_gen_movi_i64(t2, 4);
2760 while (r1 != r3) {
2761 r1 = (r1 + 1) & 15;
2762 tcg_gen_add_i64(o->in2, o->in2, t2);
2763 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2764 store_reg32_i64(r1, t1);
2766 tcg_temp_free(t2);
2767 tcg_temp_free(t1);
2769 return NO_EXIT;
2772 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2774 int r1 = get_field(s->fields, r1);
2775 int r3 = get_field(s->fields, r3);
2776 TCGv_i64 t1, t2;
2778 /* Only one register to read. */
2779 t1 = tcg_temp_new_i64();
2780 if (unlikely(r1 == r3)) {
2781 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2782 store_reg32h_i64(r1, t1);
2783 tcg_temp_free(t1);
2784 return NO_EXIT;
2787 /* First load the values of the first and last registers to trigger
2788 possible page faults. */
2789 t2 = tcg_temp_new_i64();
2790 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2791 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2792 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2793 store_reg32h_i64(r1, t1);
2794 store_reg32h_i64(r3, t2);
2796 /* Only two registers to read. */
2797 if (((r1 + 1) & 15) == r3) {
2798 tcg_temp_free(t2);
2799 tcg_temp_free(t1);
2800 return NO_EXIT;
2803 /* Then load the remaining registers. Page fault can't occur. */
2804 r3 = (r3 - 1) & 15;
2805 tcg_gen_movi_i64(t2, 4);
2806 while (r1 != r3) {
2807 r1 = (r1 + 1) & 15;
2808 tcg_gen_add_i64(o->in2, o->in2, t2);
2809 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2810 store_reg32h_i64(r1, t1);
2812 tcg_temp_free(t2);
2813 tcg_temp_free(t1);
2815 return NO_EXIT;
2818 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2820 int r1 = get_field(s->fields, r1);
2821 int r3 = get_field(s->fields, r3);
2822 TCGv_i64 t1, t2;
2824 /* Only one register to read. */
2825 if (unlikely(r1 == r3)) {
2826 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2827 return NO_EXIT;
2830 /* First load the values of the first and last registers to trigger
2831 possible page faults. */
2832 t1 = tcg_temp_new_i64();
2833 t2 = tcg_temp_new_i64();
2834 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2835 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
2836 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
2837 tcg_gen_mov_i64(regs[r1], t1);
2838 tcg_temp_free(t2);
2840 /* Only two registers to read. */
2841 if (((r1 + 1) & 15) == r3) {
2842 tcg_temp_free(t1);
2843 return NO_EXIT;
2846 /* Then load the remaining registers. Page fault can't occur. */
2847 r3 = (r3 - 1) & 15;
2848 tcg_gen_movi_i64(t1, 8);
2849 while (r1 != r3) {
2850 r1 = (r1 + 1) & 15;
2851 tcg_gen_add_i64(o->in2, o->in2, t1);
2852 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2854 tcg_temp_free(t1);
2856 return NO_EXIT;
2859 static ExitStatus op_lpd(DisasContext *s, DisasOps *o)
2861 TCGv_i64 a1, a2;
2862 TCGMemOp mop = s->insn->data;
2864 /* In a parallel context, stop the world and single step. */
2865 if (parallel_cpus) {
2866 potential_page_fault(s);
2867 gen_exception(EXCP_ATOMIC);
2868 return EXIT_NORETURN;
2871 /* In a serial context, perform the two loads ... */
2872 a1 = get_address(s, 0, get_field(s->fields, b1), get_field(s->fields, d1));
2873 a2 = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
2874 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
2875 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
2876 tcg_temp_free_i64(a1);
2877 tcg_temp_free_i64(a2);
2879 /* ... and indicate that we performed them while interlocked. */
2880 gen_op_movi_cc(s, 0);
2881 return NO_EXIT;
2884 static ExitStatus op_lpq(DisasContext *s, DisasOps *o)
2886 gen_helper_lpq(o->out, cpu_env, o->in2);
2887 return_low128(o->out2);
2888 return NO_EXIT;
2891 #ifndef CONFIG_USER_ONLY
2892 static ExitStatus op_lura(DisasContext *s, DisasOps *o)
2894 check_privileged(s);
2895 potential_page_fault(s);
2896 gen_helper_lura(o->out, cpu_env, o->in2);
2897 return NO_EXIT;
2900 static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
2902 check_privileged(s);
2903 potential_page_fault(s);
2904 gen_helper_lurag(o->out, cpu_env, o->in2);
2905 return NO_EXIT;
2907 #endif
2909 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2911 o->out = o->in2;
2912 o->g_out = o->g_in2;
2913 TCGV_UNUSED_I64(o->in2);
2914 o->g_in2 = false;
2915 return NO_EXIT;
2918 static ExitStatus op_mov2e(DisasContext *s, DisasOps *o)
2920 int b2 = get_field(s->fields, b2);
2921 TCGv ar1 = tcg_temp_new_i64();
2923 o->out = o->in2;
2924 o->g_out = o->g_in2;
2925 TCGV_UNUSED_I64(o->in2);
2926 o->g_in2 = false;
2928 switch (s->tb->flags & FLAG_MASK_ASC) {
2929 case PSW_ASC_PRIMARY >> 32:
2930 tcg_gen_movi_i64(ar1, 0);
2931 break;
2932 case PSW_ASC_ACCREG >> 32:
2933 tcg_gen_movi_i64(ar1, 1);
2934 break;
2935 case PSW_ASC_SECONDARY >> 32:
2936 if (b2) {
2937 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
2938 } else {
2939 tcg_gen_movi_i64(ar1, 0);
2941 break;
2942 case PSW_ASC_HOME >> 32:
2943 tcg_gen_movi_i64(ar1, 2);
2944 break;
2947 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
2948 tcg_temp_free_i64(ar1);
2950 return NO_EXIT;
2953 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2955 o->out = o->in1;
2956 o->out2 = o->in2;
2957 o->g_out = o->g_in1;
2958 o->g_out2 = o->g_in2;
2959 TCGV_UNUSED_I64(o->in1);
2960 TCGV_UNUSED_I64(o->in2);
2961 o->g_in1 = o->g_in2 = false;
2962 return NO_EXIT;
2965 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2967 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2968 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2969 tcg_temp_free_i32(l);
2970 return NO_EXIT;
2973 static ExitStatus op_mvcin(DisasContext *s, DisasOps *o)
2975 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2976 gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
2977 tcg_temp_free_i32(l);
2978 return NO_EXIT;
2981 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2983 int r1 = get_field(s->fields, r1);
2984 int r2 = get_field(s->fields, r2);
2985 TCGv_i32 t1, t2;
2987 /* r1 and r2 must be even. */
2988 if (r1 & 1 || r2 & 1) {
2989 gen_program_exception(s, PGM_SPECIFICATION);
2990 return EXIT_NORETURN;
2993 t1 = tcg_const_i32(r1);
2994 t2 = tcg_const_i32(r2);
2995 gen_helper_mvcl(cc_op, cpu_env, t1, t2);
2996 tcg_temp_free_i32(t1);
2997 tcg_temp_free_i32(t2);
2998 set_cc_static(s);
2999 return NO_EXIT;
3002 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
3004 int r1 = get_field(s->fields, r1);
3005 int r3 = get_field(s->fields, r3);
3006 TCGv_i32 t1, t3;
3008 /* r1 and r3 must be even. */
3009 if (r1 & 1 || r3 & 1) {
3010 gen_program_exception(s, PGM_SPECIFICATION);
3011 return EXIT_NORETURN;
3014 t1 = tcg_const_i32(r1);
3015 t3 = tcg_const_i32(r3);
3016 gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3017 tcg_temp_free_i32(t1);
3018 tcg_temp_free_i32(t3);
3019 set_cc_static(s);
3020 return NO_EXIT;
3023 static ExitStatus op_mvclu(DisasContext *s, DisasOps *o)
3025 int r1 = get_field(s->fields, r1);
3026 int r3 = get_field(s->fields, r3);
3027 TCGv_i32 t1, t3;
3029 /* r1 and r3 must be even. */
3030 if (r1 & 1 || r3 & 1) {
3031 gen_program_exception(s, PGM_SPECIFICATION);
3032 return EXIT_NORETURN;
3035 t1 = tcg_const_i32(r1);
3036 t3 = tcg_const_i32(r3);
3037 gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3038 tcg_temp_free_i32(t1);
3039 tcg_temp_free_i32(t3);
3040 set_cc_static(s);
3041 return NO_EXIT;
3044 #ifndef CONFIG_USER_ONLY
3045 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
3047 int r1 = get_field(s->fields, l1);
3048 check_privileged(s);
3049 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3050 set_cc_static(s);
3051 return NO_EXIT;
3054 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
3056 int r1 = get_field(s->fields, l1);
3057 check_privileged(s);
3058 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3059 set_cc_static(s);
3060 return NO_EXIT;
3062 #endif
3064 static ExitStatus op_mvn(DisasContext *s, DisasOps *o)
3066 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3067 gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3068 tcg_temp_free_i32(l);
3069 return NO_EXIT;
3072 static ExitStatus op_mvo(DisasContext *s, DisasOps *o)
3074 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3075 gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3076 tcg_temp_free_i32(l);
3077 return NO_EXIT;
3080 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
3082 gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
3083 set_cc_static(s);
3084 return NO_EXIT;
3087 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
3089 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3090 set_cc_static(s);
3091 return_low128(o->in2);
3092 return NO_EXIT;
3095 static ExitStatus op_mvz(DisasContext *s, DisasOps *o)
3097 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3098 gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3099 tcg_temp_free_i32(l);
3100 return NO_EXIT;
3103 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
3105 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3106 return NO_EXIT;
3109 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
3111 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3112 return NO_EXIT;
3115 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
3117 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3118 return NO_EXIT;
3121 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
3123 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3124 return NO_EXIT;
3127 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
3129 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3130 return NO_EXIT;
3133 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
3135 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3136 return_low128(o->out2);
3137 return NO_EXIT;
3140 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
3142 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3143 return_low128(o->out2);
3144 return NO_EXIT;
3147 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
3149 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3150 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3151 tcg_temp_free_i64(r3);
3152 return NO_EXIT;
3155 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
3157 int r3 = get_field(s->fields, r3);
3158 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3159 return NO_EXIT;
3162 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
3164 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3165 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3166 tcg_temp_free_i64(r3);
3167 return NO_EXIT;
3170 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
3172 int r3 = get_field(s->fields, r3);
3173 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3174 return NO_EXIT;
3177 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
3179 TCGv_i64 z, n;
3180 z = tcg_const_i64(0);
3181 n = tcg_temp_new_i64();
3182 tcg_gen_neg_i64(n, o->in2);
3183 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3184 tcg_temp_free_i64(n);
3185 tcg_temp_free_i64(z);
3186 return NO_EXIT;
3189 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
3191 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3192 return NO_EXIT;
3195 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
3197 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3198 return NO_EXIT;
3201 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
3203 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3204 tcg_gen_mov_i64(o->out2, o->in2);
3205 return NO_EXIT;
3208 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
3210 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3211 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3212 tcg_temp_free_i32(l);
3213 set_cc_static(s);
3214 return NO_EXIT;
3217 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
3219 tcg_gen_neg_i64(o->out, o->in2);
3220 return NO_EXIT;
3223 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
3225 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3226 return NO_EXIT;
3229 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
3231 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3232 return NO_EXIT;
3235 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
3237 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3238 tcg_gen_mov_i64(o->out2, o->in2);
3239 return NO_EXIT;
3242 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
3244 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3245 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3246 tcg_temp_free_i32(l);
3247 set_cc_static(s);
3248 return NO_EXIT;
3251 static ExitStatus op_or(DisasContext *s, DisasOps *o)
3253 tcg_gen_or_i64(o->out, o->in1, o->in2);
3254 return NO_EXIT;
3257 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
3259 int shift = s->insn->data & 0xff;
3260 int size = s->insn->data >> 8;
3261 uint64_t mask = ((1ull << size) - 1) << shift;
3263 assert(!o->g_in2);
3264 tcg_gen_shli_i64(o->in2, o->in2, shift);
3265 tcg_gen_or_i64(o->out, o->in1, o->in2);
3267 /* Produce the CC from only the bits manipulated. */
3268 tcg_gen_andi_i64(cc_dst, o->out, mask);
3269 set_cc_nz_u64(s, cc_dst);
3270 return NO_EXIT;
3273 static ExitStatus op_pack(DisasContext *s, DisasOps *o)
3275 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3276 gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3277 tcg_temp_free_i32(l);
3278 return NO_EXIT;
3281 static ExitStatus op_pka(DisasContext *s, DisasOps *o)
3283 int l2 = get_field(s->fields, l2) + 1;
3284 TCGv_i32 l;
3286 /* The length must not exceed 32 bytes. */
3287 if (l2 > 32) {
3288 gen_program_exception(s, PGM_SPECIFICATION);
3289 return EXIT_NORETURN;
3291 l = tcg_const_i32(l2);
3292 gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3293 tcg_temp_free_i32(l);
3294 return NO_EXIT;
3297 static ExitStatus op_pku(DisasContext *s, DisasOps *o)
3299 int l2 = get_field(s->fields, l2) + 1;
3300 TCGv_i32 l;
3302 /* The length must be even and should not exceed 64 bytes. */
3303 if ((l2 & 1) || (l2 > 64)) {
3304 gen_program_exception(s, PGM_SPECIFICATION);
3305 return EXIT_NORETURN;
3307 l = tcg_const_i32(l2);
3308 gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3309 tcg_temp_free_i32(l);
3310 return NO_EXIT;
3313 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
3315 gen_helper_popcnt(o->out, o->in2);
3316 return NO_EXIT;
3319 #ifndef CONFIG_USER_ONLY
3320 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
3322 check_privileged(s);
3323 gen_helper_ptlb(cpu_env);
3324 return NO_EXIT;
3326 #endif
3328 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
3330 int i3 = get_field(s->fields, i3);
3331 int i4 = get_field(s->fields, i4);
3332 int i5 = get_field(s->fields, i5);
3333 int do_zero = i4 & 0x80;
3334 uint64_t mask, imask, pmask;
3335 int pos, len, rot;
3337 /* Adjust the arguments for the specific insn. */
3338 switch (s->fields->op2) {
3339 case 0x55: /* risbg */
3340 i3 &= 63;
3341 i4 &= 63;
3342 pmask = ~0;
3343 break;
3344 case 0x5d: /* risbhg */
3345 i3 &= 31;
3346 i4 &= 31;
3347 pmask = 0xffffffff00000000ull;
3348 break;
3349 case 0x51: /* risblg */
3350 i3 &= 31;
3351 i4 &= 31;
3352 pmask = 0x00000000ffffffffull;
3353 break;
3354 default:
3355 abort();
3358 /* MASK is the set of bits to be inserted from R2.
3359 Take care for I3/I4 wraparound. */
3360 mask = pmask >> i3;
3361 if (i3 <= i4) {
3362 mask ^= pmask >> i4 >> 1;
3363 } else {
3364 mask |= ~(pmask >> i4 >> 1);
3366 mask &= pmask;
3368 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3369 insns, we need to keep the other half of the register. */
3370 imask = ~mask | ~pmask;
3371 if (do_zero) {
3372 if (s->fields->op2 == 0x55) {
3373 imask = 0;
3374 } else {
3375 imask = ~pmask;
3379 len = i4 - i3 + 1;
3380 pos = 63 - i4;
3381 rot = i5 & 63;
3382 if (s->fields->op2 == 0x5d) {
3383 pos += 32;
3386 /* In some cases we can implement this with extract. */
3387 if (imask == 0 && pos == 0 && len > 0 && rot + len <= 64) {
3388 tcg_gen_extract_i64(o->out, o->in2, rot, len);
3389 return NO_EXIT;
3392 /* In some cases we can implement this with deposit. */
3393 if (len > 0 && (imask == 0 || ~mask == imask)) {
3394 /* Note that we rotate the bits to be inserted to the lsb, not to
3395 the position as described in the PoO. */
3396 rot = (rot - pos) & 63;
3397 } else {
3398 pos = -1;
3401 /* Rotate the input as necessary. */
3402 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3404 /* Insert the selected bits into the output. */
3405 if (pos >= 0) {
3406 if (imask == 0) {
3407 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3408 } else {
3409 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3411 } else if (imask == 0) {
3412 tcg_gen_andi_i64(o->out, o->in2, mask);
3413 } else {
3414 tcg_gen_andi_i64(o->in2, o->in2, mask);
3415 tcg_gen_andi_i64(o->out, o->out, imask);
3416 tcg_gen_or_i64(o->out, o->out, o->in2);
3418 return NO_EXIT;
3421 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
3423 int i3 = get_field(s->fields, i3);
3424 int i4 = get_field(s->fields, i4);
3425 int i5 = get_field(s->fields, i5);
3426 uint64_t mask;
3428 /* If this is a test-only form, arrange to discard the result. */
3429 if (i3 & 0x80) {
3430 o->out = tcg_temp_new_i64();
3431 o->g_out = false;
3434 i3 &= 63;
3435 i4 &= 63;
3436 i5 &= 63;
3438 /* MASK is the set of bits to be operated on from R2.
3439 Take care for I3/I4 wraparound. */
3440 mask = ~0ull >> i3;
3441 if (i3 <= i4) {
3442 mask ^= ~0ull >> i4 >> 1;
3443 } else {
3444 mask |= ~(~0ull >> i4 >> 1);
3447 /* Rotate the input as necessary. */
3448 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3450 /* Operate. */
3451 switch (s->fields->op2) {
3452 case 0x55: /* AND */
3453 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3454 tcg_gen_and_i64(o->out, o->out, o->in2);
3455 break;
3456 case 0x56: /* OR */
3457 tcg_gen_andi_i64(o->in2, o->in2, mask);
3458 tcg_gen_or_i64(o->out, o->out, o->in2);
3459 break;
3460 case 0x57: /* XOR */
3461 tcg_gen_andi_i64(o->in2, o->in2, mask);
3462 tcg_gen_xor_i64(o->out, o->out, o->in2);
3463 break;
3464 default:
3465 abort();
3468 /* Set the CC. */
3469 tcg_gen_andi_i64(cc_dst, o->out, mask);
3470 set_cc_nz_u64(s, cc_dst);
3471 return NO_EXIT;
3474 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
3476 tcg_gen_bswap16_i64(o->out, o->in2);
3477 return NO_EXIT;
3480 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
3482 tcg_gen_bswap32_i64(o->out, o->in2);
3483 return NO_EXIT;
3486 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
3488 tcg_gen_bswap64_i64(o->out, o->in2);
3489 return NO_EXIT;
3492 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
3494 TCGv_i32 t1 = tcg_temp_new_i32();
3495 TCGv_i32 t2 = tcg_temp_new_i32();
3496 TCGv_i32 to = tcg_temp_new_i32();
3497 tcg_gen_extrl_i64_i32(t1, o->in1);
3498 tcg_gen_extrl_i64_i32(t2, o->in2);
3499 tcg_gen_rotl_i32(to, t1, t2);
3500 tcg_gen_extu_i32_i64(o->out, to);
3501 tcg_temp_free_i32(t1);
3502 tcg_temp_free_i32(t2);
3503 tcg_temp_free_i32(to);
3504 return NO_EXIT;
3507 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
3509 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3510 return NO_EXIT;
3513 #ifndef CONFIG_USER_ONLY
3514 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
3516 check_privileged(s);
3517 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3518 set_cc_static(s);
3519 return NO_EXIT;
3522 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
3524 check_privileged(s);
3525 gen_helper_sacf(cpu_env, o->in2);
3526 /* Addressing mode has changed, so end the block. */
3527 return EXIT_PC_STALE;
3529 #endif
3531 static ExitStatus op_sam(DisasContext *s, DisasOps *o)
3533 int sam = s->insn->data;
3534 TCGv_i64 tsam;
3535 uint64_t mask;
3537 switch (sam) {
3538 case 0:
3539 mask = 0xffffff;
3540 break;
3541 case 1:
3542 mask = 0x7fffffff;
3543 break;
3544 default:
3545 mask = -1;
3546 break;
3549 /* Bizarre but true, we check the address of the current insn for the
3550 specification exception, not the next to be executed. Thus the PoO
3551 documents that Bad Things Happen two bytes before the end. */
3552 if (s->pc & ~mask) {
3553 gen_program_exception(s, PGM_SPECIFICATION);
3554 return EXIT_NORETURN;
3556 s->next_pc &= mask;
3558 tsam = tcg_const_i64(sam);
3559 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3560 tcg_temp_free_i64(tsam);
3562 /* Always exit the TB, since we (may have) changed execution mode. */
3563 return EXIT_PC_STALE;
3566 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
3568 int r1 = get_field(s->fields, r1);
3569 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3570 return NO_EXIT;
3573 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
3575 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3576 return NO_EXIT;
3579 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
3581 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3582 return NO_EXIT;
3585 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
3587 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3588 return_low128(o->out2);
3589 return NO_EXIT;
3592 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
3594 gen_helper_sqeb(o->out, cpu_env, o->in2);
3595 return NO_EXIT;
3598 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
3600 gen_helper_sqdb(o->out, cpu_env, o->in2);
3601 return NO_EXIT;
3604 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
3606 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3607 return_low128(o->out2);
3608 return NO_EXIT;
3611 #ifndef CONFIG_USER_ONLY
3612 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
3614 check_privileged(s);
3615 potential_page_fault(s);
3616 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3617 set_cc_static(s);
3618 return NO_EXIT;
3621 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3623 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3624 check_privileged(s);
3625 potential_page_fault(s);
3626 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
3627 set_cc_static(s);
3628 tcg_temp_free_i32(r1);
3629 return NO_EXIT;
3631 #endif
3633 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3635 DisasCompare c;
3636 TCGv_i64 a;
3637 TCGLabel *lab;
3638 int r1;
3640 disas_jcc(s, &c, get_field(s->fields, m3));
3642 /* We want to store when the condition is fulfilled, so branch
3643 out when it's not */
3644 c.cond = tcg_invert_cond(c.cond);
3646 lab = gen_new_label();
3647 if (c.is_64) {
3648 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3649 } else {
3650 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3652 free_compare(&c);
3654 r1 = get_field(s->fields, r1);
3655 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3656 if (s->insn->data) {
3657 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3658 } else {
3659 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3661 tcg_temp_free_i64(a);
3663 gen_set_label(lab);
3664 return NO_EXIT;
3667 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3669 uint64_t sign = 1ull << s->insn->data;
3670 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3671 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3672 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3673 /* The arithmetic left shift is curious in that it does not affect
3674 the sign bit. Copy that over from the source unchanged. */
3675 tcg_gen_andi_i64(o->out, o->out, ~sign);
3676 tcg_gen_andi_i64(o->in1, o->in1, sign);
3677 tcg_gen_or_i64(o->out, o->out, o->in1);
3678 return NO_EXIT;
3681 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3683 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3684 return NO_EXIT;
3687 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3689 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3690 return NO_EXIT;
3693 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3695 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3696 return NO_EXIT;
3699 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3701 gen_helper_sfpc(cpu_env, o->in2);
3702 return NO_EXIT;
3705 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3707 gen_helper_sfas(cpu_env, o->in2);
3708 return NO_EXIT;
3711 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3713 int b2 = get_field(s->fields, b2);
3714 int d2 = get_field(s->fields, d2);
3715 TCGv_i64 t1 = tcg_temp_new_i64();
3716 TCGv_i64 t2 = tcg_temp_new_i64();
3717 int mask, pos, len;
3719 switch (s->fields->op2) {
3720 case 0x99: /* SRNM */
3721 pos = 0, len = 2;
3722 break;
3723 case 0xb8: /* SRNMB */
3724 pos = 0, len = 3;
3725 break;
3726 case 0xb9: /* SRNMT */
3727 pos = 4, len = 3;
3728 break;
3729 default:
3730 tcg_abort();
3732 mask = (1 << len) - 1;
3734 /* Insert the value into the appropriate field of the FPC. */
3735 if (b2 == 0) {
3736 tcg_gen_movi_i64(t1, d2 & mask);
3737 } else {
3738 tcg_gen_addi_i64(t1, regs[b2], d2);
3739 tcg_gen_andi_i64(t1, t1, mask);
3741 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3742 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3743 tcg_temp_free_i64(t1);
3745 /* Then install the new FPC to set the rounding mode in fpu_status. */
3746 gen_helper_sfpc(cpu_env, t2);
3747 tcg_temp_free_i64(t2);
3748 return NO_EXIT;
3751 #ifndef CONFIG_USER_ONLY
3752 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3754 check_privileged(s);
3755 tcg_gen_shri_i64(o->in2, o->in2, 4);
3756 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
3757 return NO_EXIT;
3760 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3762 check_privileged(s);
3763 gen_helper_sske(cpu_env, o->in1, o->in2);
3764 return NO_EXIT;
3767 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3769 check_privileged(s);
3770 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3771 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3772 return EXIT_PC_STALE_NOCHAIN;
3775 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3777 check_privileged(s);
3778 /* ??? Surely cpu address != cpu number. In any case the previous
3779 version of this stored more than the required half-word, so it
3780 is unlikely this has ever been tested. */
3781 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3782 return NO_EXIT;
3785 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3787 gen_helper_stck(o->out, cpu_env);
3788 /* ??? We don't implement clock states. */
3789 gen_op_movi_cc(s, 0);
3790 return NO_EXIT;
3793 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3795 TCGv_i64 c1 = tcg_temp_new_i64();
3796 TCGv_i64 c2 = tcg_temp_new_i64();
3797 gen_helper_stck(c1, cpu_env);
3798 /* Shift the 64-bit value into its place as a zero-extended
3799 104-bit value. Note that "bit positions 64-103 are always
3800 non-zero so that they compare differently to STCK"; we set
3801 the least significant bit to 1. */
3802 tcg_gen_shli_i64(c2, c1, 56);
3803 tcg_gen_shri_i64(c1, c1, 8);
3804 tcg_gen_ori_i64(c2, c2, 0x10000);
3805 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3806 tcg_gen_addi_i64(o->in2, o->in2, 8);
3807 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3808 tcg_temp_free_i64(c1);
3809 tcg_temp_free_i64(c2);
3810 /* ??? We don't implement clock states. */
3811 gen_op_movi_cc(s, 0);
3812 return NO_EXIT;
3815 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3817 check_privileged(s);
3818 gen_helper_sckc(cpu_env, o->in2);
3819 return NO_EXIT;
3822 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3824 check_privileged(s);
3825 gen_helper_stckc(o->out, cpu_env);
3826 return NO_EXIT;
3829 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3831 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3832 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3833 check_privileged(s);
3834 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3835 tcg_temp_free_i32(r1);
3836 tcg_temp_free_i32(r3);
3837 return NO_EXIT;
3840 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3842 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3843 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3844 check_privileged(s);
3845 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3846 tcg_temp_free_i32(r1);
3847 tcg_temp_free_i32(r3);
3848 return NO_EXIT;
3851 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3853 check_privileged(s);
3854 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
3855 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
3856 return NO_EXIT;
3859 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3861 check_privileged(s);
3862 gen_helper_spt(cpu_env, o->in2);
3863 return NO_EXIT;
3866 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3868 check_privileged(s);
3869 gen_helper_stfl(cpu_env);
3870 return NO_EXIT;
3873 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3875 check_privileged(s);
3876 gen_helper_stpt(o->out, cpu_env);
3877 return NO_EXIT;
3880 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3882 check_privileged(s);
3883 potential_page_fault(s);
3884 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3885 set_cc_static(s);
3886 return NO_EXIT;
3889 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3891 check_privileged(s);
3892 gen_helper_spx(cpu_env, o->in2);
3893 return NO_EXIT;
3896 static ExitStatus op_xsch(DisasContext *s, DisasOps *o)
3898 check_privileged(s);
3899 potential_page_fault(s);
3900 gen_helper_xsch(cpu_env, regs[1]);
3901 set_cc_static(s);
3902 return NO_EXIT;
3905 static ExitStatus op_csch(DisasContext *s, DisasOps *o)
3907 check_privileged(s);
3908 potential_page_fault(s);
3909 gen_helper_csch(cpu_env, regs[1]);
3910 set_cc_static(s);
3911 return NO_EXIT;
3914 static ExitStatus op_hsch(DisasContext *s, DisasOps *o)
3916 check_privileged(s);
3917 potential_page_fault(s);
3918 gen_helper_hsch(cpu_env, regs[1]);
3919 set_cc_static(s);
3920 return NO_EXIT;
3923 static ExitStatus op_msch(DisasContext *s, DisasOps *o)
3925 check_privileged(s);
3926 potential_page_fault(s);
3927 gen_helper_msch(cpu_env, regs[1], o->in2);
3928 set_cc_static(s);
3929 return NO_EXIT;
3932 static ExitStatus op_rchp(DisasContext *s, DisasOps *o)
3934 check_privileged(s);
3935 potential_page_fault(s);
3936 gen_helper_rchp(cpu_env, regs[1]);
3937 set_cc_static(s);
3938 return NO_EXIT;
3941 static ExitStatus op_rsch(DisasContext *s, DisasOps *o)
3943 check_privileged(s);
3944 potential_page_fault(s);
3945 gen_helper_rsch(cpu_env, regs[1]);
3946 set_cc_static(s);
3947 return NO_EXIT;
3950 static ExitStatus op_ssch(DisasContext *s, DisasOps *o)
3952 check_privileged(s);
3953 potential_page_fault(s);
3954 gen_helper_ssch(cpu_env, regs[1], o->in2);
3955 set_cc_static(s);
3956 return NO_EXIT;
3959 static ExitStatus op_stsch(DisasContext *s, DisasOps *o)
3961 check_privileged(s);
3962 potential_page_fault(s);
3963 gen_helper_stsch(cpu_env, regs[1], o->in2);
3964 set_cc_static(s);
3965 return NO_EXIT;
3968 static ExitStatus op_tsch(DisasContext *s, DisasOps *o)
3970 check_privileged(s);
3971 potential_page_fault(s);
3972 gen_helper_tsch(cpu_env, regs[1], o->in2);
3973 set_cc_static(s);
3974 return NO_EXIT;
3977 static ExitStatus op_chsc(DisasContext *s, DisasOps *o)
3979 check_privileged(s);
3980 potential_page_fault(s);
3981 gen_helper_chsc(cpu_env, o->in2);
3982 set_cc_static(s);
3983 return NO_EXIT;
3986 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
3988 check_privileged(s);
3989 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
3990 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
3991 return NO_EXIT;
3994 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3996 uint64_t i2 = get_field(s->fields, i2);
3997 TCGv_i64 t;
3999 check_privileged(s);
4001 /* It is important to do what the instruction name says: STORE THEN.
4002 If we let the output hook perform the store then if we fault and
4003 restart, we'll have the wrong SYSTEM MASK in place. */
4004 t = tcg_temp_new_i64();
4005 tcg_gen_shri_i64(t, psw_mask, 56);
4006 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4007 tcg_temp_free_i64(t);
4009 if (s->fields->op == 0xac) {
4010 tcg_gen_andi_i64(psw_mask, psw_mask,
4011 (i2 << 56) | 0x00ffffffffffffffull);
4012 } else {
4013 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4016 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4017 return EXIT_PC_STALE_NOCHAIN;
4020 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
4022 check_privileged(s);
4023 potential_page_fault(s);
4024 gen_helper_stura(cpu_env, o->in2, o->in1);
4025 return NO_EXIT;
4028 static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
4030 check_privileged(s);
4031 potential_page_fault(s);
4032 gen_helper_sturg(cpu_env, o->in2, o->in1);
4033 return NO_EXIT;
4035 #endif
4037 static ExitStatus op_stfle(DisasContext *s, DisasOps *o)
4039 potential_page_fault(s);
4040 gen_helper_stfle(cc_op, cpu_env, o->in2);
4041 set_cc_static(s);
4042 return NO_EXIT;
4045 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
4047 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4048 return NO_EXIT;
4051 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
4053 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4054 return NO_EXIT;
4057 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
4059 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4060 return NO_EXIT;
4063 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
4065 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4066 return NO_EXIT;
4069 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
4071 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4072 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4073 gen_helper_stam(cpu_env, r1, o->in2, r3);
4074 tcg_temp_free_i32(r1);
4075 tcg_temp_free_i32(r3);
4076 return NO_EXIT;
4079 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
4081 int m3 = get_field(s->fields, m3);
4082 int pos, base = s->insn->data;
4083 TCGv_i64 tmp = tcg_temp_new_i64();
4085 pos = base + ctz32(m3) * 8;
4086 switch (m3) {
4087 case 0xf:
4088 /* Effectively a 32-bit store. */
4089 tcg_gen_shri_i64(tmp, o->in1, pos);
4090 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4091 break;
4093 case 0xc:
4094 case 0x6:
4095 case 0x3:
4096 /* Effectively a 16-bit store. */
4097 tcg_gen_shri_i64(tmp, o->in1, pos);
4098 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4099 break;
4101 case 0x8:
4102 case 0x4:
4103 case 0x2:
4104 case 0x1:
4105 /* Effectively an 8-bit store. */
4106 tcg_gen_shri_i64(tmp, o->in1, pos);
4107 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4108 break;
4110 default:
4111 /* This is going to be a sequence of shifts and stores. */
4112 pos = base + 32 - 8;
4113 while (m3) {
4114 if (m3 & 0x8) {
4115 tcg_gen_shri_i64(tmp, o->in1, pos);
4116 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4117 tcg_gen_addi_i64(o->in2, o->in2, 1);
4119 m3 = (m3 << 1) & 0xf;
4120 pos -= 8;
4122 break;
4124 tcg_temp_free_i64(tmp);
4125 return NO_EXIT;
4128 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
4130 int r1 = get_field(s->fields, r1);
4131 int r3 = get_field(s->fields, r3);
4132 int size = s->insn->data;
4133 TCGv_i64 tsize = tcg_const_i64(size);
4135 while (1) {
4136 if (size == 8) {
4137 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4138 } else {
4139 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4141 if (r1 == r3) {
4142 break;
4144 tcg_gen_add_i64(o->in2, o->in2, tsize);
4145 r1 = (r1 + 1) & 15;
4148 tcg_temp_free_i64(tsize);
4149 return NO_EXIT;
4152 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
4154 int r1 = get_field(s->fields, r1);
4155 int r3 = get_field(s->fields, r3);
4156 TCGv_i64 t = tcg_temp_new_i64();
4157 TCGv_i64 t4 = tcg_const_i64(4);
4158 TCGv_i64 t32 = tcg_const_i64(32);
4160 while (1) {
4161 tcg_gen_shl_i64(t, regs[r1], t32);
4162 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4163 if (r1 == r3) {
4164 break;
4166 tcg_gen_add_i64(o->in2, o->in2, t4);
4167 r1 = (r1 + 1) & 15;
4170 tcg_temp_free_i64(t);
4171 tcg_temp_free_i64(t4);
4172 tcg_temp_free_i64(t32);
4173 return NO_EXIT;
4176 static ExitStatus op_stpq(DisasContext *s, DisasOps *o)
4178 gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4179 return NO_EXIT;
4182 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
4184 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
4185 set_cc_static(s);
4186 return_low128(o->in2);
4187 return NO_EXIT;
4190 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
4192 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4193 return NO_EXIT;
4196 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
4198 DisasCompare cmp;
4199 TCGv_i64 borrow;
4201 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4203 /* The !borrow flag is the msb of CC. Since we want the inverse of
4204 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4205 disas_jcc(s, &cmp, 8 | 4);
4206 borrow = tcg_temp_new_i64();
4207 if (cmp.is_64) {
4208 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
4209 } else {
4210 TCGv_i32 t = tcg_temp_new_i32();
4211 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
4212 tcg_gen_extu_i32_i64(borrow, t);
4213 tcg_temp_free_i32(t);
4215 free_compare(&cmp);
4217 tcg_gen_sub_i64(o->out, o->out, borrow);
4218 tcg_temp_free_i64(borrow);
4219 return NO_EXIT;
4222 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
4224 TCGv_i32 t;
4226 update_psw_addr(s);
4227 update_cc_op(s);
4229 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
4230 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4231 tcg_temp_free_i32(t);
4233 t = tcg_const_i32(s->ilen);
4234 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4235 tcg_temp_free_i32(t);
4237 gen_exception(EXCP_SVC);
4238 return EXIT_NORETURN;
4241 static ExitStatus op_tam(DisasContext *s, DisasOps *o)
4243 int cc = 0;
4245 cc |= (s->tb->flags & FLAG_MASK_64) ? 2 : 0;
4246 cc |= (s->tb->flags & FLAG_MASK_32) ? 1 : 0;
4247 gen_op_movi_cc(s, cc);
4248 return NO_EXIT;
4251 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
4253 gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4254 set_cc_static(s);
4255 return NO_EXIT;
4258 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
4260 gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4261 set_cc_static(s);
4262 return NO_EXIT;
4265 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
4267 gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4268 set_cc_static(s);
4269 return NO_EXIT;
4272 #ifndef CONFIG_USER_ONLY
4274 static ExitStatus op_testblock(DisasContext *s, DisasOps *o)
4276 check_privileged(s);
4277 gen_helper_testblock(cc_op, cpu_env, o->in2);
4278 set_cc_static(s);
4279 return NO_EXIT;
4282 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
4284 gen_helper_tprot(cc_op, o->addr1, o->in2);
4285 set_cc_static(s);
4286 return NO_EXIT;
4289 #endif
4291 static ExitStatus op_tp(DisasContext *s, DisasOps *o)
4293 TCGv_i32 l1 = tcg_const_i32(get_field(s->fields, l1) + 1);
4294 gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4295 tcg_temp_free_i32(l1);
4296 set_cc_static(s);
4297 return NO_EXIT;
4300 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
4302 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4303 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4304 tcg_temp_free_i32(l);
4305 set_cc_static(s);
4306 return NO_EXIT;
4309 static ExitStatus op_tre(DisasContext *s, DisasOps *o)
4311 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4312 return_low128(o->out2);
4313 set_cc_static(s);
4314 return NO_EXIT;
4317 static ExitStatus op_trt(DisasContext *s, DisasOps *o)
4319 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4320 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4321 tcg_temp_free_i32(l);
4322 set_cc_static(s);
4323 return NO_EXIT;
4326 static ExitStatus op_trXX(DisasContext *s, DisasOps *o)
4328 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4329 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4330 TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4331 TCGv_i32 tst = tcg_temp_new_i32();
4332 int m3 = get_field(s->fields, m3);
4334 /* XXX: the C bit in M3 should be considered as 0 when the
4335 ETF2-enhancement facility is not installed. */
4336 if (m3 & 1) {
4337 tcg_gen_movi_i32(tst, -1);
4338 } else {
4339 tcg_gen_extrl_i64_i32(tst, regs[0]);
4340 if (s->insn->opc & 3) {
4341 tcg_gen_ext8u_i32(tst, tst);
4342 } else {
4343 tcg_gen_ext16u_i32(tst, tst);
4346 gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4348 tcg_temp_free_i32(r1);
4349 tcg_temp_free_i32(r2);
4350 tcg_temp_free_i32(sizes);
4351 tcg_temp_free_i32(tst);
4352 set_cc_static(s);
4353 return NO_EXIT;
4356 static ExitStatus op_ts(DisasContext *s, DisasOps *o)
4358 TCGv_i32 t1 = tcg_const_i32(0xff);
4359 tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4360 tcg_gen_extract_i32(cc_op, t1, 7, 1);
4361 tcg_temp_free_i32(t1);
4362 set_cc_static(s);
4363 return NO_EXIT;
4366 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
4368 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4369 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4370 tcg_temp_free_i32(l);
4371 return NO_EXIT;
4374 static ExitStatus op_unpka(DisasContext *s, DisasOps *o)
4376 int l1 = get_field(s->fields, l1) + 1;
4377 TCGv_i32 l;
4379 /* The length must not exceed 32 bytes. */
4380 if (l1 > 32) {
4381 gen_program_exception(s, PGM_SPECIFICATION);
4382 return EXIT_NORETURN;
4384 l = tcg_const_i32(l1);
4385 gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4386 tcg_temp_free_i32(l);
4387 set_cc_static(s);
4388 return NO_EXIT;
4391 static ExitStatus op_unpku(DisasContext *s, DisasOps *o)
4393 int l1 = get_field(s->fields, l1) + 1;
4394 TCGv_i32 l;
4396 /* The length must be even and should not exceed 64 bytes. */
4397 if ((l1 & 1) || (l1 > 64)) {
4398 gen_program_exception(s, PGM_SPECIFICATION);
4399 return EXIT_NORETURN;
4401 l = tcg_const_i32(l1);
4402 gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4403 tcg_temp_free_i32(l);
4404 set_cc_static(s);
4405 return NO_EXIT;
4409 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
4411 int d1 = get_field(s->fields, d1);
4412 int d2 = get_field(s->fields, d2);
4413 int b1 = get_field(s->fields, b1);
4414 int b2 = get_field(s->fields, b2);
4415 int l = get_field(s->fields, l1);
4416 TCGv_i32 t32;
4418 o->addr1 = get_address(s, 0, b1, d1);
4420 /* If the addresses are identical, this is a store/memset of zero. */
4421 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4422 o->in2 = tcg_const_i64(0);
4424 l++;
4425 while (l >= 8) {
4426 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4427 l -= 8;
4428 if (l > 0) {
4429 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4432 if (l >= 4) {
4433 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4434 l -= 4;
4435 if (l > 0) {
4436 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4439 if (l >= 2) {
4440 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4441 l -= 2;
4442 if (l > 0) {
4443 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4446 if (l) {
4447 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4449 gen_op_movi_cc(s, 0);
4450 return NO_EXIT;
4453 /* But in general we'll defer to a helper. */
4454 o->in2 = get_address(s, 0, b2, d2);
4455 t32 = tcg_const_i32(l);
4456 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4457 tcg_temp_free_i32(t32);
4458 set_cc_static(s);
4459 return NO_EXIT;
4462 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
4464 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4465 return NO_EXIT;
4468 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
4470 int shift = s->insn->data & 0xff;
4471 int size = s->insn->data >> 8;
4472 uint64_t mask = ((1ull << size) - 1) << shift;
4474 assert(!o->g_in2);
4475 tcg_gen_shli_i64(o->in2, o->in2, shift);
4476 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4478 /* Produce the CC from only the bits manipulated. */
4479 tcg_gen_andi_i64(cc_dst, o->out, mask);
4480 set_cc_nz_u64(s, cc_dst);
4481 return NO_EXIT;
4484 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
4486 o->out = tcg_const_i64(0);
4487 return NO_EXIT;
4490 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
4492 o->out = tcg_const_i64(0);
4493 o->out2 = o->out;
4494 o->g_out2 = true;
4495 return NO_EXIT;
4498 /* ====================================================================== */
4499 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4500 the original inputs), update the various cc data structures in order to
4501 be able to compute the new condition code. */
4503 static void cout_abs32(DisasContext *s, DisasOps *o)
4505 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4508 static void cout_abs64(DisasContext *s, DisasOps *o)
4510 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4513 static void cout_adds32(DisasContext *s, DisasOps *o)
4515 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4518 static void cout_adds64(DisasContext *s, DisasOps *o)
4520 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4523 static void cout_addu32(DisasContext *s, DisasOps *o)
4525 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
4528 static void cout_addu64(DisasContext *s, DisasOps *o)
4530 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
4533 static void cout_addc32(DisasContext *s, DisasOps *o)
4535 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
4538 static void cout_addc64(DisasContext *s, DisasOps *o)
4540 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
4543 static void cout_cmps32(DisasContext *s, DisasOps *o)
4545 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4548 static void cout_cmps64(DisasContext *s, DisasOps *o)
4550 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4553 static void cout_cmpu32(DisasContext *s, DisasOps *o)
4555 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
4558 static void cout_cmpu64(DisasContext *s, DisasOps *o)
4560 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
4563 static void cout_f32(DisasContext *s, DisasOps *o)
4565 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
4568 static void cout_f64(DisasContext *s, DisasOps *o)
4570 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
4573 static void cout_f128(DisasContext *s, DisasOps *o)
4575 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
4578 static void cout_nabs32(DisasContext *s, DisasOps *o)
4580 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
4583 static void cout_nabs64(DisasContext *s, DisasOps *o)
4585 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
4588 static void cout_neg32(DisasContext *s, DisasOps *o)
4590 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
4593 static void cout_neg64(DisasContext *s, DisasOps *o)
4595 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
4598 static void cout_nz32(DisasContext *s, DisasOps *o)
4600 tcg_gen_ext32u_i64(cc_dst, o->out);
4601 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
4604 static void cout_nz64(DisasContext *s, DisasOps *o)
4606 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
4609 static void cout_s32(DisasContext *s, DisasOps *o)
4611 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
4614 static void cout_s64(DisasContext *s, DisasOps *o)
4616 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
4619 static void cout_subs32(DisasContext *s, DisasOps *o)
4621 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
4624 static void cout_subs64(DisasContext *s, DisasOps *o)
4626 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
4629 static void cout_subu32(DisasContext *s, DisasOps *o)
4631 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
4634 static void cout_subu64(DisasContext *s, DisasOps *o)
4636 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
4639 static void cout_subb32(DisasContext *s, DisasOps *o)
4641 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
4644 static void cout_subb64(DisasContext *s, DisasOps *o)
4646 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
4649 static void cout_tm32(DisasContext *s, DisasOps *o)
4651 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
4654 static void cout_tm64(DisasContext *s, DisasOps *o)
4656 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
4659 /* ====================================================================== */
4660 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4661 with the TCG register to which we will write. Used in combination with
4662 the "wout" generators, in some cases we need a new temporary, and in
4663 some cases we can write to a TCG global. */
4665 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
4667 o->out = tcg_temp_new_i64();
4669 #define SPEC_prep_new 0
4671 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
4673 o->out = tcg_temp_new_i64();
4674 o->out2 = tcg_temp_new_i64();
4676 #define SPEC_prep_new_P 0
4678 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4680 o->out = regs[get_field(f, r1)];
4681 o->g_out = true;
4683 #define SPEC_prep_r1 0
4685 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
4687 int r1 = get_field(f, r1);
4688 o->out = regs[r1];
4689 o->out2 = regs[r1 + 1];
4690 o->g_out = o->g_out2 = true;
4692 #define SPEC_prep_r1_P SPEC_r1_even
4694 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4696 o->out = fregs[get_field(f, r1)];
4697 o->g_out = true;
4699 #define SPEC_prep_f1 0
4701 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4703 int r1 = get_field(f, r1);
4704 o->out = fregs[r1];
4705 o->out2 = fregs[r1 + 2];
4706 o->g_out = o->g_out2 = true;
4708 #define SPEC_prep_x1 SPEC_r1_f128
4710 /* ====================================================================== */
4711 /* The "Write OUTput" generators. These generally perform some non-trivial
4712 copy of data to TCG globals, or to main memory. The trivial cases are
4713 generally handled by having a "prep" generator install the TCG global
4714 as the destination of the operation. */
4716 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4718 store_reg(get_field(f, r1), o->out);
4720 #define SPEC_wout_r1 0
4722 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4724 int r1 = get_field(f, r1);
4725 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
4727 #define SPEC_wout_r1_8 0
4729 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4731 int r1 = get_field(f, r1);
4732 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
4734 #define SPEC_wout_r1_16 0
4736 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4738 store_reg32_i64(get_field(f, r1), o->out);
4740 #define SPEC_wout_r1_32 0
4742 static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
4744 store_reg32h_i64(get_field(f, r1), o->out);
4746 #define SPEC_wout_r1_32h 0
4748 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4750 int r1 = get_field(f, r1);
4751 store_reg32_i64(r1, o->out);
4752 store_reg32_i64(r1 + 1, o->out2);
4754 #define SPEC_wout_r1_P32 SPEC_r1_even
4756 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4758 int r1 = get_field(f, r1);
4759 store_reg32_i64(r1 + 1, o->out);
4760 tcg_gen_shri_i64(o->out, o->out, 32);
4761 store_reg32_i64(r1, o->out);
4763 #define SPEC_wout_r1_D32 SPEC_r1_even
4765 static void wout_r3_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4767 int r3 = get_field(f, r3);
4768 store_reg32_i64(r3, o->out);
4769 store_reg32_i64(r3 + 1, o->out2);
4771 #define SPEC_wout_r3_P32 SPEC_r3_even
4773 static void wout_r3_P64(DisasContext *s, DisasFields *f, DisasOps *o)
4775 int r3 = get_field(f, r3);
4776 store_reg(r3, o->out);
4777 store_reg(r3 + 1, o->out2);
4779 #define SPEC_wout_r3_P64 SPEC_r3_even
4781 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4783 store_freg32_i64(get_field(f, r1), o->out);
4785 #define SPEC_wout_e1 0
4787 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4789 store_freg(get_field(f, r1), o->out);
4791 #define SPEC_wout_f1 0
4793 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4795 int f1 = get_field(s->fields, r1);
4796 store_freg(f1, o->out);
4797 store_freg(f1 + 2, o->out2);
4799 #define SPEC_wout_x1 SPEC_r1_f128
4801 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4803 if (get_field(f, r1) != get_field(f, r2)) {
4804 store_reg32_i64(get_field(f, r1), o->out);
4807 #define SPEC_wout_cond_r1r2_32 0
4809 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
4811 if (get_field(f, r1) != get_field(f, r2)) {
4812 store_freg32_i64(get_field(f, r1), o->out);
4815 #define SPEC_wout_cond_e1e2 0
4817 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4819 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
4821 #define SPEC_wout_m1_8 0
4823 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4825 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
4827 #define SPEC_wout_m1_16 0
4829 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4831 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4833 #define SPEC_wout_m1_32 0
4835 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4837 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4839 #define SPEC_wout_m1_64 0
4841 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4843 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
4845 #define SPEC_wout_m2_32 0
4847 static void wout_in2_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4849 store_reg(get_field(f, r1), o->in2);
4851 #define SPEC_wout_in2_r1 0
4853 static void wout_in2_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4855 store_reg32_i64(get_field(f, r1), o->in2);
4857 #define SPEC_wout_in2_r1_32 0
4859 /* ====================================================================== */
4860 /* The "INput 1" generators. These load the first operand to an insn. */
4862 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4864 o->in1 = load_reg(get_field(f, r1));
4866 #define SPEC_in1_r1 0
4868 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4870 o->in1 = regs[get_field(f, r1)];
4871 o->g_in1 = true;
4873 #define SPEC_in1_r1_o 0
4875 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4877 o->in1 = tcg_temp_new_i64();
4878 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
4880 #define SPEC_in1_r1_32s 0
4882 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4884 o->in1 = tcg_temp_new_i64();
4885 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
4887 #define SPEC_in1_r1_32u 0
4889 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4891 o->in1 = tcg_temp_new_i64();
4892 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
4894 #define SPEC_in1_r1_sr32 0
4896 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
4898 o->in1 = load_reg(get_field(f, r1) + 1);
4900 #define SPEC_in1_r1p1 SPEC_r1_even
4902 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4904 o->in1 = tcg_temp_new_i64();
4905 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
4907 #define SPEC_in1_r1p1_32s SPEC_r1_even
4909 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4911 o->in1 = tcg_temp_new_i64();
4912 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
4914 #define SPEC_in1_r1p1_32u SPEC_r1_even
4916 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4918 int r1 = get_field(f, r1);
4919 o->in1 = tcg_temp_new_i64();
4920 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
4922 #define SPEC_in1_r1_D32 SPEC_r1_even
4924 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4926 o->in1 = load_reg(get_field(f, r2));
4928 #define SPEC_in1_r2 0
4930 static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4932 o->in1 = tcg_temp_new_i64();
4933 tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
4935 #define SPEC_in1_r2_sr32 0
4937 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4939 o->in1 = load_reg(get_field(f, r3));
4941 #define SPEC_in1_r3 0
4943 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4945 o->in1 = regs[get_field(f, r3)];
4946 o->g_in1 = true;
4948 #define SPEC_in1_r3_o 0
4950 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4952 o->in1 = tcg_temp_new_i64();
4953 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
4955 #define SPEC_in1_r3_32s 0
4957 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4959 o->in1 = tcg_temp_new_i64();
4960 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
4962 #define SPEC_in1_r3_32u 0
4964 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4966 int r3 = get_field(f, r3);
4967 o->in1 = tcg_temp_new_i64();
4968 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
4970 #define SPEC_in1_r3_D32 SPEC_r3_even
4972 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4974 o->in1 = load_freg32_i64(get_field(f, r1));
4976 #define SPEC_in1_e1 0
4978 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4980 o->in1 = fregs[get_field(f, r1)];
4981 o->g_in1 = true;
4983 #define SPEC_in1_f1_o 0
4985 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4987 int r1 = get_field(f, r1);
4988 o->out = fregs[r1];
4989 o->out2 = fregs[r1 + 2];
4990 o->g_out = o->g_out2 = true;
4992 #define SPEC_in1_x1_o SPEC_r1_f128
4994 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4996 o->in1 = fregs[get_field(f, r3)];
4997 o->g_in1 = true;
4999 #define SPEC_in1_f3_o 0
5001 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
5003 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
5005 #define SPEC_in1_la1 0
5007 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
5009 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5010 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5012 #define SPEC_in1_la2 0
5014 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5016 in1_la1(s, f, o);
5017 o->in1 = tcg_temp_new_i64();
5018 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5020 #define SPEC_in1_m1_8u 0
5022 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5024 in1_la1(s, f, o);
5025 o->in1 = tcg_temp_new_i64();
5026 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5028 #define SPEC_in1_m1_16s 0
5030 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5032 in1_la1(s, f, o);
5033 o->in1 = tcg_temp_new_i64();
5034 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5036 #define SPEC_in1_m1_16u 0
5038 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5040 in1_la1(s, f, o);
5041 o->in1 = tcg_temp_new_i64();
5042 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5044 #define SPEC_in1_m1_32s 0
5046 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5048 in1_la1(s, f, o);
5049 o->in1 = tcg_temp_new_i64();
5050 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5052 #define SPEC_in1_m1_32u 0
5054 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5056 in1_la1(s, f, o);
5057 o->in1 = tcg_temp_new_i64();
5058 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5060 #define SPEC_in1_m1_64 0
5062 /* ====================================================================== */
5063 /* The "INput 2" generators. These load the second operand to an insn. */
5065 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5067 o->in2 = regs[get_field(f, r1)];
5068 o->g_in2 = true;
5070 #define SPEC_in2_r1_o 0
5072 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5074 o->in2 = tcg_temp_new_i64();
5075 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
5077 #define SPEC_in2_r1_16u 0
5079 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5081 o->in2 = tcg_temp_new_i64();
5082 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
5084 #define SPEC_in2_r1_32u 0
5086 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5088 int r1 = get_field(f, r1);
5089 o->in2 = tcg_temp_new_i64();
5090 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5092 #define SPEC_in2_r1_D32 SPEC_r1_even
5094 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5096 o->in2 = load_reg(get_field(f, r2));
5098 #define SPEC_in2_r2 0
5100 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5102 o->in2 = regs[get_field(f, r2)];
5103 o->g_in2 = true;
5105 #define SPEC_in2_r2_o 0
5107 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
5109 int r2 = get_field(f, r2);
5110 if (r2 != 0) {
5111 o->in2 = load_reg(r2);
5114 #define SPEC_in2_r2_nz 0
5116 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
5118 o->in2 = tcg_temp_new_i64();
5119 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
5121 #define SPEC_in2_r2_8s 0
5123 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5125 o->in2 = tcg_temp_new_i64();
5126 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
5128 #define SPEC_in2_r2_8u 0
5130 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5132 o->in2 = tcg_temp_new_i64();
5133 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
5135 #define SPEC_in2_r2_16s 0
5137 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5139 o->in2 = tcg_temp_new_i64();
5140 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
5142 #define SPEC_in2_r2_16u 0
5144 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5146 o->in2 = load_reg(get_field(f, r3));
5148 #define SPEC_in2_r3 0
5150 static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5152 o->in2 = tcg_temp_new_i64();
5153 tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
5155 #define SPEC_in2_r3_sr32 0
5157 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5159 o->in2 = tcg_temp_new_i64();
5160 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
5162 #define SPEC_in2_r2_32s 0
5164 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5166 o->in2 = tcg_temp_new_i64();
5167 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
5169 #define SPEC_in2_r2_32u 0
5171 static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5173 o->in2 = tcg_temp_new_i64();
5174 tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
5176 #define SPEC_in2_r2_sr32 0
5178 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
5180 o->in2 = load_freg32_i64(get_field(f, r2));
5182 #define SPEC_in2_e2 0
5184 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5186 o->in2 = fregs[get_field(f, r2)];
5187 o->g_in2 = true;
5189 #define SPEC_in2_f2_o 0
5191 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5193 int r2 = get_field(f, r2);
5194 o->in1 = fregs[r2];
5195 o->in2 = fregs[r2 + 2];
5196 o->g_in1 = o->g_in2 = true;
5198 #define SPEC_in2_x2_o SPEC_r2_f128
5200 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
5202 o->in2 = get_address(s, 0, get_field(f, r2), 0);
5204 #define SPEC_in2_ra2 0
5206 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
5208 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5209 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5211 #define SPEC_in2_a2 0
5213 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
5215 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
5217 #define SPEC_in2_ri2 0
5219 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
5221 help_l2_shift(s, f, o, 31);
5223 #define SPEC_in2_sh32 0
5225 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
5227 help_l2_shift(s, f, o, 63);
5229 #define SPEC_in2_sh64 0
5231 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5233 in2_a2(s, f, o);
5234 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5236 #define SPEC_in2_m2_8u 0
5238 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5240 in2_a2(s, f, o);
5241 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5243 #define SPEC_in2_m2_16s 0
5245 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5247 in2_a2(s, f, o);
5248 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5250 #define SPEC_in2_m2_16u 0
5252 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5254 in2_a2(s, f, o);
5255 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5257 #define SPEC_in2_m2_32s 0
5259 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5261 in2_a2(s, f, o);
5262 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5264 #define SPEC_in2_m2_32u 0
5266 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5268 in2_a2(s, f, o);
5269 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5271 #define SPEC_in2_m2_64 0
5273 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5275 in2_ri2(s, f, o);
5276 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5278 #define SPEC_in2_mri2_16u 0
5280 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5282 in2_ri2(s, f, o);
5283 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5285 #define SPEC_in2_mri2_32s 0
5287 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5289 in2_ri2(s, f, o);
5290 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5292 #define SPEC_in2_mri2_32u 0
5294 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5296 in2_ri2(s, f, o);
5297 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5299 #define SPEC_in2_mri2_64 0
5301 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
5303 o->in2 = tcg_const_i64(get_field(f, i2));
5305 #define SPEC_in2_i2 0
5307 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5309 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
5311 #define SPEC_in2_i2_8u 0
5313 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5315 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
5317 #define SPEC_in2_i2_16u 0
5319 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5321 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
5323 #define SPEC_in2_i2_32u 0
5325 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5327 uint64_t i2 = (uint16_t)get_field(f, i2);
5328 o->in2 = tcg_const_i64(i2 << s->insn->data);
5330 #define SPEC_in2_i2_16u_shl 0
5332 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5334 uint64_t i2 = (uint32_t)get_field(f, i2);
5335 o->in2 = tcg_const_i64(i2 << s->insn->data);
5337 #define SPEC_in2_i2_32u_shl 0
5339 #ifndef CONFIG_USER_ONLY
5340 static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
5342 o->in2 = tcg_const_i64(s->fields->raw_insn);
5344 #define SPEC_in2_insn 0
5345 #endif
5347 /* ====================================================================== */
5349 /* Find opc within the table of insns. This is formulated as a switch
5350 statement so that (1) we get compile-time notice of cut-paste errors
5351 for duplicated opcodes, and (2) the compiler generates the binary
5352 search tree, rather than us having to post-process the table. */
5354 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5355 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5357 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5359 enum DisasInsnEnum {
5360 #include "insn-data.def"
5363 #undef D
5364 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5365 .opc = OPC, \
5366 .fmt = FMT_##FT, \
5367 .fac = FAC_##FC, \
5368 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5369 .name = #NM, \
5370 .help_in1 = in1_##I1, \
5371 .help_in2 = in2_##I2, \
5372 .help_prep = prep_##P, \
5373 .help_wout = wout_##W, \
5374 .help_cout = cout_##CC, \
5375 .help_op = op_##OP, \
5376 .data = D \
5379 /* Allow 0 to be used for NULL in the table below. */
5380 #define in1_0 NULL
5381 #define in2_0 NULL
5382 #define prep_0 NULL
5383 #define wout_0 NULL
5384 #define cout_0 NULL
5385 #define op_0 NULL
5387 #define SPEC_in1_0 0
5388 #define SPEC_in2_0 0
5389 #define SPEC_prep_0 0
5390 #define SPEC_wout_0 0
5392 /* Give smaller names to the various facilities. */
5393 #define FAC_Z S390_FEAT_ZARCH
5394 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
5395 #define FAC_CASS2 S390_FEAT_COMPARE_AND_SWAP_AND_STORE_2
5396 #define FAC_DFP S390_FEAT_DFP
5397 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
5398 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
5399 #define FAC_EE S390_FEAT_EXECUTE_EXT
5400 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
5401 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
5402 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
5403 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
5404 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
5405 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
5406 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
5407 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
5408 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
5409 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
5410 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
5411 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
5412 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
5413 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
5414 #define FAC_SFLE S390_FEAT_STFLE
5415 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
5416 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
5417 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
5418 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
5420 static const DisasInsn insn_info[] = {
5421 #include "insn-data.def"
5424 #undef D
5425 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5426 case OPC: return &insn_info[insn_ ## NM];
5428 static const DisasInsn *lookup_opc(uint16_t opc)
5430 switch (opc) {
5431 #include "insn-data.def"
5432 default:
5433 return NULL;
5437 #undef D
5438 #undef C
5440 /* Extract a field from the insn. The INSN should be left-aligned in
5441 the uint64_t so that we can more easily utilize the big-bit-endian
5442 definitions we extract from the Principals of Operation. */
5444 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
5446 uint32_t r, m;
5448 if (f->size == 0) {
5449 return;
5452 /* Zero extract the field from the insn. */
5453 r = (insn << f->beg) >> (64 - f->size);
5455 /* Sign-extend, or un-swap the field as necessary. */
5456 switch (f->type) {
5457 case 0: /* unsigned */
5458 break;
5459 case 1: /* signed */
5460 assert(f->size <= 32);
5461 m = 1u << (f->size - 1);
5462 r = (r ^ m) - m;
5463 break;
5464 case 2: /* dl+dh split, signed 20 bit. */
5465 r = ((int8_t)r << 12) | (r >> 8);
5466 break;
5467 default:
5468 abort();
5471 /* Validate that the "compressed" encoding we selected above is valid.
5472 I.e. we havn't make two different original fields overlap. */
5473 assert(((o->presentC >> f->indexC) & 1) == 0);
5474 o->presentC |= 1 << f->indexC;
5475 o->presentO |= 1 << f->indexO;
5477 o->c[f->indexC] = r;
5480 /* Lookup the insn at the current PC, extracting the operands into O and
5481 returning the info struct for the insn. Returns NULL for invalid insn. */
5483 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
5484 DisasFields *f)
5486 uint64_t insn, pc = s->pc;
5487 int op, op2, ilen;
5488 const DisasInsn *info;
5490 if (unlikely(s->ex_value)) {
5491 /* Drop the EX data now, so that it's clear on exception paths. */
5492 TCGv_i64 zero = tcg_const_i64(0);
5493 tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
5494 tcg_temp_free_i64(zero);
5496 /* Extract the values saved by EXECUTE. */
5497 insn = s->ex_value & 0xffffffffffff0000ull;
5498 ilen = s->ex_value & 0xf;
5499 op = insn >> 56;
5500 } else {
5501 insn = ld_code2(env, pc);
5502 op = (insn >> 8) & 0xff;
5503 ilen = get_ilen(op);
5504 switch (ilen) {
5505 case 2:
5506 insn = insn << 48;
5507 break;
5508 case 4:
5509 insn = ld_code4(env, pc) << 32;
5510 break;
5511 case 6:
5512 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
5513 break;
5514 default:
5515 g_assert_not_reached();
5518 s->next_pc = s->pc + ilen;
5519 s->ilen = ilen;
5521 /* We can't actually determine the insn format until we've looked up
5522 the full insn opcode. Which we can't do without locating the
5523 secondary opcode. Assume by default that OP2 is at bit 40; for
5524 those smaller insns that don't actually have a secondary opcode
5525 this will correctly result in OP2 = 0. */
5526 switch (op) {
5527 case 0x01: /* E */
5528 case 0x80: /* S */
5529 case 0x82: /* S */
5530 case 0x93: /* S */
5531 case 0xb2: /* S, RRF, RRE */
5532 case 0xb3: /* RRE, RRD, RRF */
5533 case 0xb9: /* RRE, RRF */
5534 case 0xe5: /* SSE, SIL */
5535 op2 = (insn << 8) >> 56;
5536 break;
5537 case 0xa5: /* RI */
5538 case 0xa7: /* RI */
5539 case 0xc0: /* RIL */
5540 case 0xc2: /* RIL */
5541 case 0xc4: /* RIL */
5542 case 0xc6: /* RIL */
5543 case 0xc8: /* SSF */
5544 case 0xcc: /* RIL */
5545 op2 = (insn << 12) >> 60;
5546 break;
5547 case 0xd0 ... 0xdf: /* SS */
5548 case 0xe1: /* SS */
5549 case 0xe2: /* SS */
5550 case 0xe8: /* SS */
5551 case 0xe9: /* SS */
5552 case 0xea: /* SS */
5553 case 0xee ... 0xf3: /* SS */
5554 case 0xf8 ... 0xfd: /* SS */
5555 op2 = 0;
5556 break;
5557 default:
5558 op2 = (insn << 40) >> 56;
5559 break;
5562 memset(f, 0, sizeof(*f));
5563 f->raw_insn = insn;
5564 f->op = op;
5565 f->op2 = op2;
5567 /* Lookup the instruction. */
5568 info = lookup_opc(op << 8 | op2);
5570 /* If we found it, extract the operands. */
5571 if (info != NULL) {
5572 DisasFormat fmt = info->fmt;
5573 int i;
5575 for (i = 0; i < NUM_C_FIELD; ++i) {
5576 extract_field(f, &format_info[fmt].op[i], insn);
5579 return info;
5582 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
5584 const DisasInsn *insn;
5585 ExitStatus ret = NO_EXIT;
5586 DisasFields f;
5587 DisasOps o;
5589 /* Search for the insn in the table. */
5590 insn = extract_insn(env, s, &f);
5592 /* Not found means unimplemented/illegal opcode. */
5593 if (insn == NULL) {
5594 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
5595 f.op, f.op2);
5596 gen_illegal_opcode(s);
5597 return EXIT_NORETURN;
5600 #ifndef CONFIG_USER_ONLY
5601 if (s->tb->flags & FLAG_MASK_PER) {
5602 TCGv_i64 addr = tcg_const_i64(s->pc);
5603 gen_helper_per_ifetch(cpu_env, addr);
5604 tcg_temp_free_i64(addr);
5606 #endif
5608 /* Check for insn specification exceptions. */
5609 if (insn->spec) {
5610 int spec = insn->spec, excp = 0, r;
5612 if (spec & SPEC_r1_even) {
5613 r = get_field(&f, r1);
5614 if (r & 1) {
5615 excp = PGM_SPECIFICATION;
5618 if (spec & SPEC_r2_even) {
5619 r = get_field(&f, r2);
5620 if (r & 1) {
5621 excp = PGM_SPECIFICATION;
5624 if (spec & SPEC_r3_even) {
5625 r = get_field(&f, r3);
5626 if (r & 1) {
5627 excp = PGM_SPECIFICATION;
5630 if (spec & SPEC_r1_f128) {
5631 r = get_field(&f, r1);
5632 if (r > 13) {
5633 excp = PGM_SPECIFICATION;
5636 if (spec & SPEC_r2_f128) {
5637 r = get_field(&f, r2);
5638 if (r > 13) {
5639 excp = PGM_SPECIFICATION;
5642 if (excp) {
5643 gen_program_exception(s, excp);
5644 return EXIT_NORETURN;
5648 /* Set up the strutures we use to communicate with the helpers. */
5649 s->insn = insn;
5650 s->fields = &f;
5651 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
5652 TCGV_UNUSED_I64(o.out);
5653 TCGV_UNUSED_I64(o.out2);
5654 TCGV_UNUSED_I64(o.in1);
5655 TCGV_UNUSED_I64(o.in2);
5656 TCGV_UNUSED_I64(o.addr1);
5658 /* Implement the instruction. */
5659 if (insn->help_in1) {
5660 insn->help_in1(s, &f, &o);
5662 if (insn->help_in2) {
5663 insn->help_in2(s, &f, &o);
5665 if (insn->help_prep) {
5666 insn->help_prep(s, &f, &o);
5668 if (insn->help_op) {
5669 ret = insn->help_op(s, &o);
5671 if (insn->help_wout) {
5672 insn->help_wout(s, &f, &o);
5674 if (insn->help_cout) {
5675 insn->help_cout(s, &o);
5678 /* Free any temporaries created by the helpers. */
5679 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
5680 tcg_temp_free_i64(o.out);
5682 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
5683 tcg_temp_free_i64(o.out2);
5685 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
5686 tcg_temp_free_i64(o.in1);
5688 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
5689 tcg_temp_free_i64(o.in2);
5691 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
5692 tcg_temp_free_i64(o.addr1);
5695 #ifndef CONFIG_USER_ONLY
5696 if (s->tb->flags & FLAG_MASK_PER) {
5697 /* An exception might be triggered, save PSW if not already done. */
5698 if (ret == NO_EXIT || ret == EXIT_PC_STALE) {
5699 tcg_gen_movi_i64(psw_addr, s->next_pc);
5702 /* Save off cc. */
5703 update_cc_op(s);
5705 /* Call the helper to check for a possible PER exception. */
5706 gen_helper_per_check_exception(cpu_env);
5708 #endif
5710 /* Advance to the next instruction. */
5711 s->pc = s->next_pc;
5712 return ret;
5715 void gen_intermediate_code(CPUS390XState *env, struct TranslationBlock *tb)
5717 S390CPU *cpu = s390_env_get_cpu(env);
5718 CPUState *cs = CPU(cpu);
5719 DisasContext dc;
5720 target_ulong pc_start;
5721 uint64_t next_page_start;
5722 int num_insns, max_insns;
5723 ExitStatus status;
5724 bool do_debug;
5726 pc_start = tb->pc;
5728 /* 31-bit mode */
5729 if (!(tb->flags & FLAG_MASK_64)) {
5730 pc_start &= 0x7fffffff;
5733 dc.tb = tb;
5734 dc.pc = pc_start;
5735 dc.cc_op = CC_OP_DYNAMIC;
5736 dc.ex_value = tb->cs_base;
5737 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
5739 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
5741 num_insns = 0;
5742 max_insns = tb->cflags & CF_COUNT_MASK;
5743 if (max_insns == 0) {
5744 max_insns = CF_COUNT_MASK;
5746 if (max_insns > TCG_MAX_INSNS) {
5747 max_insns = TCG_MAX_INSNS;
5750 gen_tb_start(tb);
5752 do {
5753 tcg_gen_insn_start(dc.pc, dc.cc_op);
5754 num_insns++;
5756 if (unlikely(cpu_breakpoint_test(cs, dc.pc, BP_ANY))) {
5757 status = EXIT_PC_STALE;
5758 do_debug = true;
5759 /* The address covered by the breakpoint must be included in
5760 [tb->pc, tb->pc + tb->size) in order to for it to be
5761 properly cleared -- thus we increment the PC here so that
5762 the logic setting tb->size below does the right thing. */
5763 dc.pc += 2;
5764 break;
5767 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5768 gen_io_start();
5771 status = translate_one(env, &dc);
5773 /* If we reach a page boundary, are single stepping,
5774 or exhaust instruction count, stop generation. */
5775 if (status == NO_EXIT
5776 && (dc.pc >= next_page_start
5777 || tcg_op_buf_full()
5778 || num_insns >= max_insns
5779 || singlestep
5780 || cs->singlestep_enabled
5781 || dc.ex_value)) {
5782 status = EXIT_PC_STALE;
5784 } while (status == NO_EXIT);
5786 if (tb->cflags & CF_LAST_IO) {
5787 gen_io_end();
5790 switch (status) {
5791 case EXIT_GOTO_TB:
5792 case EXIT_NORETURN:
5793 break;
5794 case EXIT_PC_STALE:
5795 case EXIT_PC_STALE_NOCHAIN:
5796 update_psw_addr(&dc);
5797 /* FALLTHRU */
5798 case EXIT_PC_UPDATED:
5799 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5800 cc op type is in env */
5801 update_cc_op(&dc);
5802 /* FALLTHRU */
5803 case EXIT_PC_CC_UPDATED:
5804 /* Exit the TB, either by raising a debug exception or by return. */
5805 if (do_debug) {
5806 gen_exception(EXCP_DEBUG);
5807 } else if (use_exit_tb(&dc) || status == EXIT_PC_STALE_NOCHAIN) {
5808 tcg_gen_exit_tb(0);
5809 } else {
5810 tcg_gen_lookup_and_goto_ptr(psw_addr);
5812 break;
5813 default:
5814 g_assert_not_reached();
5817 gen_tb_end(tb, num_insns);
5819 tb->size = dc.pc - pc_start;
5820 tb->icount = num_insns;
5822 #if defined(S390X_DEBUG_DISAS)
5823 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
5824 && qemu_log_in_addr_range(pc_start)) {
5825 qemu_log_lock();
5826 if (unlikely(dc.ex_value)) {
5827 /* ??? Unfortunately log_target_disas can't use host memory. */
5828 qemu_log("IN: EXECUTE %016" PRIx64 "\n", dc.ex_value);
5829 } else {
5830 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5831 log_target_disas(cs, pc_start, dc.pc - pc_start, 1);
5832 qemu_log("\n");
5834 qemu_log_unlock();
5836 #endif
5839 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
5840 target_ulong *data)
5842 int cc_op = data[1];
5843 env->psw.addr = data[0];
5844 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
5845 env->cc_op = cc_op;